vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-02-01 12:23:23 -08:00
parent 8b9ebf625a
commit 607b542222
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
62 changed files with 3259 additions and 846 deletions

View file

@ -102,6 +102,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to read block: %s", err) return fmt.Errorf("failed to read block: %s", err)
} }
var it chunkenc.Iterator
for ss.Next() { for ss.Next() {
var name string var name string
var labels []vm.LabelPair var labels []vm.LabelPair
@ -123,7 +124,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
var timestamps []int64 var timestamps []int64
var values []float64 var values []float64
it := series.Iterator() it = series.Iterator(it)
for { for {
typ := it.Next() typ := it.Next()
if typ == chunkenc.ValNone { if typ == chunkenc.ValNone {

View file

@ -15,6 +15,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunks"
) )
const ( const (
@ -154,7 +155,7 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
t.Fatalf("error unmarshal read request: %s", err) t.Fatalf("error unmarshal read request: %s", err)
} }
var chunks []prompb.Chunk var chks []prompb.Chunk
ctx := context.Background() ctx := context.Background()
for idx, r := range req.Queries { for idx, r := range req.Queries {
startTs := r.StartTimestampMs startTs := r.StartTimestampMs
@ -171,9 +172,10 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
} }
ss := q.Select(false, nil, matchers...) ss := q.Select(false, nil, matchers...)
var iter chunks.Iterator
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
iter := series.Iterator() iter = series.Iterator(iter)
labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil) labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil)
frameBytesLeft := maxBytesInFrame frameBytesLeft := maxBytesInFrame
@ -190,14 +192,14 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref) t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref)
} }
chunks = append(chunks, prompb.Chunk{ chks = append(chks, prompb.Chunk{
MinTimeMs: chunk.MinTime, MinTimeMs: chunk.MinTime,
MaxTimeMs: chunk.MaxTime, MaxTimeMs: chunk.MaxTime,
Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()), Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()),
Data: chunk.Chunk.Bytes(), Data: chunk.Chunk.Bytes(),
}) })
frameBytesLeft -= chunks[len(chunks)-1].Size() frameBytesLeft -= chks[len(chks)-1].Size()
// We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
isNext = iter.Next() isNext = iter.Next()
@ -207,7 +209,7 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
resp := &prompb.ChunkedReadResponse{ resp := &prompb.ChunkedReadResponse{
ChunkedSeries: []*prompb.ChunkedSeries{ ChunkedSeries: []*prompb.ChunkedSeries{
{Labels: labels, Chunks: chunks}, {Labels: labels, Chunks: chks},
}, },
QueryIndex: int64(idx), QueryIndex: int64(idx),
} }
@ -220,7 +222,7 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
if _, err := stream.Write(b); err != nil { if _, err := stream.Write(b); err != nil {
t.Fatalf("error write to stream: %s", err) t.Fatalf("error write to stream: %s", err)
} }
chunks = chunks[:0] chks = chks[:0]
rrs.storage.Reset() rrs.storage.Reset()
} }
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {

12
go.mod
View file

@ -14,8 +14,8 @@ require (
github.com/VictoriaMetrics/metrics v1.23.1 github.com/VictoriaMetrics/metrics v1.23.1
github.com/VictoriaMetrics/metricsql v0.51.2 github.com/VictoriaMetrics/metricsql v0.51.2
github.com/aws/aws-sdk-go-v2 v1.17.3 github.com/aws/aws-sdk-go-v2 v1.17.3
github.com/aws/aws-sdk-go-v2/config v1.18.10 github.com/aws/aws-sdk-go-v2/config v1.18.11
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.50
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.30.1
github.com/cespare/xxhash/v2 v2.2.0 github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.0 github.com/cheggaaa/pb/v3 v3.1.0
@ -24,7 +24,7 @@ require (
github.com/googleapis/gax-go/v2 v2.7.0 github.com/googleapis/gax-go/v2 v2.7.0
github.com/influxdata/influxdb v1.11.0 github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.15.15 github.com/klauspost/compress v1.15.15
github.com/prometheus/prometheus v0.41.0 github.com/prometheus/prometheus v0.42.0
github.com/urfave/cli/v2 v2.24.2 github.com/urfave/cli/v2 v2.24.2
github.com/valyala/fastjson v1.6.4 github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0 github.com/valyala/fastrand v1.1.0
@ -47,9 +47,9 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.190 // indirect github.com/aws/aws-sdk-go v1.44.192 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
@ -112,7 +112,7 @@ require (
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa // indirect google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect
google.golang.org/grpc v1.52.3 // indirect google.golang.org/grpc v1.52.3 // indirect
google.golang.org/protobuf v1.28.1 // indirect google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

52
go.sum
View file

@ -52,7 +52,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQO
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@ -87,20 +87,20 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.190 h1:QC+Pf/Ooj7Waf2obOPZbIQOqr00hy4h54j3ZK9mvHcc= github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
github.com/aws/aws-sdk-go v1.44.190/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.18.10 h1:Znce11DWswdh+5kOsIp+QaNfY9igp1QUN+fZHCKmeCI= github.com/aws/aws-sdk-go-v2/config v1.18.11 h1:7dJD4p90OyKYIihuwe/LbHfP7uw4yVm5P1hel+b8UZ8=
github.com/aws/aws-sdk-go-v2/config v1.18.10/go.mod h1:VATKco+pl+Qe1WW+RzvZTlPPe/09Gg9+vM0ZXsqb16k= github.com/aws/aws-sdk-go-v2/config v1.18.11/go.mod h1:FTGKr2F7QL7IAg22dUmEB5NWpLPAOuhrONzXe7TVhAI=
github.com/aws/aws-sdk-go-v2/credentials v1.13.10 h1:T4Y39IhelTLg1f3xiKJssThnFxsndS8B6OnmcXtKK+8= github.com/aws/aws-sdk-go-v2/credentials v1.13.11 h1:QnvlTut1XXKkX4aaM1Ydo5X0CHriv0jmLu8PTVQQJJo=
github.com/aws/aws-sdk-go-v2/credentials v1.13.10/go.mod h1:tqAm4JmQaShel+Qi38hmd1QglSnnxaYt50k/9yGQzzc= github.com/aws/aws-sdk-go-v2/credentials v1.13.11/go.mod h1:tqAm4JmQaShel+Qi38hmd1QglSnnxaYt50k/9yGQzzc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49 h1:zPFhadkmXbXu3RVXTPU4HVW+g2DStMY+01cJaj//+Cw= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.50 h1:ATgzvd5DaU0Evx7yvaUw2ftwiWDGnDN59zowPF3jDk0=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49/go.mod h1:N9gSChQkKpdAj7vRpfKma4ND88zoZM+v6W2lJgWrDh4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.50/go.mod h1:naA7bah2/dpvwlyWysZ7yaAYI1Ti73HPaDyGryfJuiU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE=
@ -151,10 +151,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc= github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
@ -191,7 +191,7 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
@ -279,17 +279,19 @@ github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQnBRxotBo9ibxBFiYGuYhQU25s/YeV2cTU=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg= github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg=
@ -326,7 +328,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
@ -392,8 +394,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI=
github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
@ -401,7 +403,7 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -675,7 +677,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -737,8 +739,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa h1:GZXdWYIKckxQE2EcLHLvF+KLF+bIwoxGdMUxTZizueg= google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI=
google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -794,9 +796,9 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s=

View file

@ -1,3 +1,7 @@
# v1.18.11 (2023-02-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.10 (2023-01-25) # v1.18.10 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config package config
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.10" const goModuleVersion = "1.18.11"

View file

@ -1,3 +1,7 @@
# v1.13.11 (2023-02-01)
* No change notes available for this release.
# v1.13.10 (2023-01-25) # v1.13.10 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -11,7 +11,7 @@
// # Loading credentials with the SDK's AWS Config // # Loading credentials with the SDK's AWS Config
// //
// The EC2 Instance role credentials provider will automatically be the resolved // The EC2 Instance role credentials provider will automatically be the resolved
// credential provider int he credential chain if no other credential provider is // credential provider in the credential chain if no other credential provider is
// resolved first. // resolved first.
// //
// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance // To explicitly instruct the SDK's credentials resolving to use the EC2 Instance

View file

@ -3,4 +3,4 @@
package credentials package credentials
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.10" const goModuleVersion = "1.13.11"

View file

@ -1,3 +1,7 @@
# v1.11.50 (2023-02-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.49 (2023-01-25) # v1.11.49 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager package manager
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.49" const goModuleVersion = "1.11.50"

View file

@ -4938,6 +4938,76 @@ var awsPartition = partition{
}, },
}, },
}, },
"cloudtrail-data": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"codeartifact": service{ "codeartifact": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{
@ -11127,6 +11197,9 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "fms-fips.ap-southeast-2.amazonaws.com", Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
}, },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
@ -11355,6 +11428,9 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "me-south-1", Region: "me-south-1",
}: endpoint{}, }: endpoint{},
@ -12230,6 +12306,9 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "me-south-1", Region: "me-south-1",
}: endpoint{}, }: endpoint{},
@ -14225,16 +14304,16 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "kendra-ranking.ap-southeast-3.api.aws", Hostname: "kendra-ranking.ap-southeast-3.api.aws",
}, },
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "kendra-ranking.ap-southeast-4.api.aws",
},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{ }: endpoint{
Hostname: "kendra-ranking.ca-central-1.api.aws", Hostname: "kendra-ranking.ca-central-1.api.aws",
}, },
endpointKey{
Region: "eu-central-1",
}: endpoint{
Hostname: "kendra-ranking.eu-central-1.api.aws",
},
endpointKey{ endpointKey{
Region: "eu-central-2", Region: "eu-central-2",
}: endpoint{ }: endpoint{
@ -14260,11 +14339,6 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "kendra-ranking.eu-west-1.api.aws", Hostname: "kendra-ranking.eu-west-1.api.aws",
}, },
endpointKey{
Region: "eu-west-2",
}: endpoint{
Hostname: "kendra-ranking.eu-west-2.api.aws",
},
endpointKey{ endpointKey{
Region: "eu-west-3", Region: "eu-west-3",
}: endpoint{ }: endpoint{
@ -31351,6 +31425,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}, },
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{ }: endpoint{
@ -31359,6 +31451,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}, },
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
}, },
}, },
"cloudhsm": service{ "cloudhsm": service{
@ -33324,6 +33434,24 @@ var awsusgovPartition = partition{
}, },
"kinesis": service{ "kinesis": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "kinesis.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "kinesis.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}: endpoint{ }: endpoint{
@ -33332,6 +33460,15 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}, },
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "kinesis.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{ }: endpoint{
@ -33340,6 +33477,15 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}, },
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "kinesis.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
}, },
}, },
"kinesisanalytics": service{ "kinesisanalytics": service{
@ -34051,6 +34197,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}, },
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "ram.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "ram.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{ }: endpoint{
@ -34059,6 +34223,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}, },
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "ram.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "ram.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
}, },
}, },
"rbin": service{ "rbin": service{
@ -35440,6 +35622,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}, },
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "swf.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "swf.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{ }: endpoint{
@ -35448,6 +35648,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}, },
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "swf.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "swf.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
}, },
}, },
"synthetics": service{ "synthetics": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.190" const SDKVersion = "1.44.192"

View file

@ -80,7 +80,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
return cfg, nil return cfg, nil
} }
for i, v := range cfg.GlobalConfig.ExternalLabels { b := labels.ScratchBuilder{}
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string { newV := os.Expand(v.Value, func(s string) string {
if s == "$" { if s == "$" {
return "$" return "$"
@ -93,10 +94,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
}) })
if newV != v.Value { if newV != v.Value {
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
v.Value = newV
cfg.GlobalConfig.ExternalLabels[i] = v
} }
} b.Add(v.Name, newV)
})
cfg.GlobalConfig.ExternalLabels = b.Labels()
return cfg, nil return cfg, nil
} }
@ -112,10 +113,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.
} }
if agentMode { if agentMode {
if len(cfg.RemoteWriteConfigs) == 0 {
return nil, errors.New("at least one remote_write target must be specified in agent mode")
}
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
return nil, errors.New("field alerting is not allowed in agent mode") return nil, errors.New("field alerting is not allowed in agent mode")
} }
@ -361,13 +358,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
for _, l := range gc.ExternalLabels { if err := gc.ExternalLabels.Validate(func(l labels.Label) error {
if !model.LabelName(l.Name).IsValid() { if !model.LabelName(l.Name).IsValid() {
return fmt.Errorf("%q is not a valid label name", l.Name) return fmt.Errorf("%q is not a valid label name", l.Name)
} }
if !model.LabelValue(l.Value).IsValid() { if !model.LabelValue(l.Value).IsValid() {
return fmt.Errorf("%q is not a valid label value", l.Value) return fmt.Errorf("%q is not a valid label value", l.Value)
} }
return nil
}); err != nil {
return err
} }
// First set the correct scrape interval, then check that the timeout // First set the correct scrape interval, then check that the timeout
@ -394,7 +394,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// isZero returns true iff the global config is the zero value. // isZero returns true iff the global config is the zero value.
func (c *GlobalConfig) isZero() bool { func (c *GlobalConfig) isZero() bool {
return c.ExternalLabels == nil && return c.ExternalLabels.IsEmpty() &&
c.ScrapeInterval == 0 && c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 && c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 && c.EvaluationInterval == 0 &&

View file

@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
} }
typ := cfg.Name() typ := cfg.Name()
d, err := cfg.NewDiscoverer(DiscovererOptions{ d, err := cfg.NewDiscoverer(DiscovererOptions{
Logger: log.With(m.logger, "discovery", typ), Logger: log.With(m.logger, "discovery", typ, "config", setName),
HTTPClientOptions: m.httpOpts, HTTPClientOptions: m.httpOpts,
}) })
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
failed++ failed++
return return
} }

View file

@ -27,6 +27,8 @@ import (
// used to represent a histogram with integer counts and thus serves as a more // used to represent a histogram with integer counts and thus serves as a more
// generalized representation. // generalized representation.
type FloatHistogram struct { type FloatHistogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or // then each power of two is divided into 2^n logarithmic buckets. Or
@ -244,6 +246,37 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
return h return h
} }
// Equals returns true if the given float histogram matches exactly.
// Exact match is when there are no new buckets (even empty) and no missing buckets,
// and all the bucket values match. Spans can have different empty length spans in between,
// but they must represent the same bucket layout to match.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil {
return false
}
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true
}
// addBucket takes the "coordinates" of the last bucket that was handled and // addBucket takes the "coordinates" of the last bucket that was handled and
// adds the provided bucket after it. If a corresponding bucket exists, the // adds the provided bucket after it. If a corresponding bucket exists, the
// count is added. If not, the bucket is inserted. The updated slices and the // count is added. If not, the bucket is inserted. The updated slices and the

View file

@ -25,14 +25,14 @@ type BucketCount interface {
float64 | uint64 float64 | uint64
} }
// internalBucketCount is used internally by Histogram and FloatHistogram. The // InternalBucketCount is used internally by Histogram and FloatHistogram. The
// difference to the BucketCount above is that Histogram internally uses deltas // difference to the BucketCount above is that Histogram internally uses deltas
// between buckets rather than absolute counts (while FloatHistogram uses // between buckets rather than absolute counts (while FloatHistogram uses
// absolute counts directly). Go type parameters don't allow type // absolute counts directly). Go type parameters don't allow type
// specialization. Therefore, where special treatment of deltas between buckets // specialization. Therefore, where special treatment of deltas between buckets
// vs. absolute counts is important, this information has to be provided as a // vs. absolute counts is important, this information has to be provided as a
// separate boolean parameter "deltaBuckets" // separate boolean parameter "deltaBuckets"
type internalBucketCount interface { type InternalBucketCount interface {
float64 | int64 float64 | int64
} }
@ -86,7 +86,7 @@ type BucketIterator[BC BucketCount] interface {
// implementations, together with an implementation of the At method. This // implementations, together with an implementation of the At method. This
// iterator can be embedded in full implementations of BucketIterator to save on // iterator can be embedded in full implementations of BucketIterator to save on
// code replication. // code replication.
type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct { type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
schema int32 schema int32
spans []Span spans []Span
buckets []IBC buckets []IBC
@ -121,7 +121,7 @@ func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] {
// compactBuckets is a generic function used by both Histogram.Compact and // compactBuckets is a generic function used by both Histogram.Compact and
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are // FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
// deltas. Set it to false if the buckets contain absolute counts. // deltas. Set it to false if the buckets contain absolute counts.
func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
// Fast path: If there are no empty buckets AND no offset in any span is // Fast path: If there are no empty buckets AND no offset in any span is
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
// immediately. We check that first because it's cheap and presumably // immediately. We check that first because it's cheap and presumably
@ -327,6 +327,18 @@ func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmp
return buckets, spans return buckets, spans
} }
func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool {
if len(b1) != len(b2) {
return false
}
for i, b := range b1 {
if b != b2[i] {
return false
}
}
return true
}
func getBound(idx, schema int32) float64 { func getBound(idx, schema int32) float64 {
// Here a bit of context about the behavior for the last bucket counting // Here a bit of context about the behavior for the last bucket counting
// regular numbers (called simply "last bucket" below) and the bucket // regular numbers (called simply "last bucket" below) and the bucket

View file

@ -19,6 +19,17 @@ import (
"strings" "strings"
) )
// CounterResetHint contains the known information about a counter reset,
// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply.
type CounterResetHint byte
const (
UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not.
CounterReset // CounterReset means there was definitely a counter reset starting from this histogram.
NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram.
GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen.
)
// Histogram encodes a sparse, high-resolution histogram. See the design // Histogram encodes a sparse, high-resolution histogram. See the design
// document for full details: // document for full details:
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# // https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit#
@ -35,6 +46,8 @@ import (
// //
// Which bucket indices are actually used is determined by the spans. // Which bucket indices are actually used is determined by the spans.
type Histogram struct { type Histogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or // then each power of two is divided into 2^n logarithmic buckets. Or
@ -250,18 +263,6 @@ func allEmptySpans(s []Span) bool {
return true return true
} }
func bucketsMatch(b1, b2 []int64) bool {
if len(b1) != len(b2) {
return false
}
for i, b := range b1 {
if b != b2[i] {
return false
}
}
return true
}
// Compact works like FloatHistogram.Compact. See there for detailed // Compact works like FloatHistogram.Compact. See there for detailed
// explanations. // explanations.
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
@ -307,15 +308,16 @@ func (h *Histogram) ToFloat() *FloatHistogram {
} }
return &FloatHistogram{ return &FloatHistogram{
Schema: h.Schema, CounterResetHint: h.CounterResetHint,
ZeroThreshold: h.ZeroThreshold, Schema: h.Schema,
ZeroCount: float64(h.ZeroCount), ZeroThreshold: h.ZeroThreshold,
Count: float64(h.Count), ZeroCount: float64(h.ZeroCount),
Sum: h.Sum, Count: float64(h.Count),
PositiveSpans: positiveSpans, Sum: h.Sum,
NegativeSpans: negativeSpans, PositiveSpans: positiveSpans,
PositiveBuckets: positiveBuckets, NegativeSpans: negativeSpans,
NegativeBuckets: negativeBuckets, PositiveBuckets: positiveBuckets,
NegativeBuckets: negativeBuckets,
} }
} }

View file

@ -357,9 +357,7 @@ func EmptyLabels() Labels {
// The caller has to guarantee that all label names are unique. // The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels { func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls)) set := make(Labels, 0, len(ls))
for _, l := range ls { set = append(set, ls...)
set = append(set, l)
}
sort.Sort(set) sort.Sort(set)
return set return set
@ -414,6 +412,49 @@ func Compare(a, b Labels) int {
return len(a) - len(b) return len(a) - len(b)
} }
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) {
(*ls) = append((*ls)[:0], b...)
}
// IsEmpty returns true if ls represents an empty set of labels.
func (ls Labels) IsEmpty() bool {
return len(ls) == 0
}
// Range calls f on each label.
func (ls Labels) Range(f func(l Label)) {
for _, l := range ls {
f(l)
}
}
// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
func (ls Labels) Validate(f func(l Label) error) error {
for _, l := range ls {
if err := f(l); err != nil {
return err
}
}
return nil
}
// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
func (ls *Labels) InternStrings(intern func(string) string) {
for i, l := range *ls {
(*ls)[i].Name = intern(l.Name)
(*ls)[i].Value = intern(l.Value)
}
}
// ReleaseStrings calls release on every string value inside ls.
func (ls Labels) ReleaseStrings(release func(string)) {
for _, l := range ls {
release(l.Name)
release(l.Value)
}
}
// Builder allows modifying Labels. // Builder allows modifying Labels.
type Builder struct { type Builder struct {
base Labels base Labels
@ -470,7 +511,7 @@ Outer:
return b return b
} }
// Set the name/value pair as a label. // Set the name/value pair as a label. A value of "" means delete that label.
func (b *Builder) Set(n, v string) *Builder { func (b *Builder) Set(n, v string) *Builder {
if v == "" { if v == "" {
// Empty labels are the same as missing labels. // Empty labels are the same as missing labels.
@ -525,3 +566,40 @@ Outer:
} }
return res return res
} }
// ScratchBuilder allows efficient construction of a Labels from scratch.
type ScratchBuilder struct {
add Labels
}
// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
func NewScratchBuilder(n int) ScratchBuilder {
return ScratchBuilder{add: make([]Label, 0, n)}
}
func (b *ScratchBuilder) Reset() {
b.add = b.add[:0]
}
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
sort.Sort(b.add)
}
// Asssign is for when you already have a Labels which you want this ScratchBuilder to return.
func (b *ScratchBuilder) Assign(ls Labels) {
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
}
// Return the name/value pairs added so far as a Labels object.
// Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...)
}

View file

@ -17,7 +17,6 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"os" "os"
"sort"
"strings" "strings"
) )
@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
defer f.Close() defer f.Close()
scanner := bufio.NewScanner(f) scanner := bufio.NewScanner(f)
b := ScratchBuilder{}
var mets []Labels var mets []Labels
hashes := map[uint64]struct{}{} hashes := map[uint64]struct{}{}
i := 0 i := 0
for scanner.Scan() && i < n { for scanner.Scan() && i < n {
m := make(Labels, 0, 10) b.Reset()
r := strings.NewReplacer("\"", "", "{", "", "}", "") r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text()) s := r.Replace(scanner.Text())
@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
labelChunks := strings.Split(s, ",") labelChunks := strings.Split(s, ",")
for _, labelChunk := range labelChunks { for _, labelChunk := range labelChunks {
split := strings.Split(labelChunk, ":") split := strings.Split(labelChunk, ":")
m = append(m, Label{Name: split[0], Value: split[1]}) b.Add(split[0], split[1])
} }
// Order of the k/v labels matters, don't assume we'll always receive them already sorted. // Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort.Sort(m) b.Sort()
m := b.Labels()
h := m.Hash() h := m.Hash()
if _, ok := hashes[h]; ok { if _, ok := hashes[h]; ok {

View file

@ -203,20 +203,20 @@ func (re Regexp) String() string {
// Process returns a relabeled copy of the given label set. The relabel configurations // Process returns a relabeled copy of the given label set. The relabel configurations
// are applied in order of input. // are applied in order of input.
// If a label set is dropped, nil is returned. // If a label set is dropped, EmptyLabels and false is returned.
// May return the input labelSet modified. // May return the input labelSet modified.
func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
lb := labels.NewBuilder(nil) lb := labels.NewBuilder(labels.EmptyLabels())
for _, cfg := range cfgs { for _, cfg := range cfgs {
lbls = relabel(lbls, cfg, lb) lbls, keep = relabel(lbls, cfg, lb)
if lbls == nil { if !keep {
return nil return labels.EmptyLabels(), false
} }
} }
return lbls return lbls, true
} }
func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.Labels, keep bool) {
var va [16]string var va [16]string
values := va[:0] values := va[:0]
if len(cfg.SourceLabels) > cap(values) { if len(cfg.SourceLabels) > cap(values) {
@ -232,19 +232,19 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels
switch cfg.Action { switch cfg.Action {
case Drop: case Drop:
if cfg.Regex.MatchString(val) { if cfg.Regex.MatchString(val) {
return nil return labels.EmptyLabels(), false
} }
case Keep: case Keep:
if !cfg.Regex.MatchString(val) { if !cfg.Regex.MatchString(val) {
return nil return labels.EmptyLabels(), false
} }
case DropEqual: case DropEqual:
if lset.Get(cfg.TargetLabel) == val { if lset.Get(cfg.TargetLabel) == val {
return nil return labels.EmptyLabels(), false
} }
case KeepEqual: case KeepEqual:
if lset.Get(cfg.TargetLabel) != val { if lset.Get(cfg.TargetLabel) != val {
return nil return labels.EmptyLabels(), false
} }
case Replace: case Replace:
indexes := cfg.Regex.FindStringSubmatchIndex(val) indexes := cfg.Regex.FindStringSubmatchIndex(val)
@ -271,29 +271,29 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels
mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
case LabelMap: case LabelMap:
for _, l := range lset { lset.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) { if cfg.Regex.MatchString(l.Name) {
res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement)
lb.Set(res, l.Value) lb.Set(res, l.Value)
} }
} })
case LabelDrop: case LabelDrop:
for _, l := range lset { lset.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) { if cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name) lb.Del(l.Name)
} }
} })
case LabelKeep: case LabelKeep:
for _, l := range lset { lset.Range(func(l labels.Label) {
if !cfg.Regex.MatchString(l.Name) { if !cfg.Regex.MatchString(l.Name) {
lb.Del(l.Name) lb.Del(l.Name)
} }
} })
default: default:
panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action))
} }
return lb.Labels(lset) return lb.Labels(lset), true
} }
// sum64 sums the md5 hash to an uint64. // sum64 sums the md5 hash to an uint64.

View file

@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"sort"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -82,6 +81,7 @@ func (l *openMetricsLexer) Error(es string) {
// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit
type OpenMetricsParser struct { type OpenMetricsParser struct {
l *openMetricsLexer l *openMetricsLexer
builder labels.ScratchBuilder
series []byte series []byte
text []byte text []byte
mtype MetricType mtype MetricType
@ -113,8 +113,8 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
return p.series, nil, p.val return p.series, nil, p.val
} }
// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support // Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not
// sparse histograms. // support sparse histograms yet.
func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil return nil, nil, nil, nil
} }
@ -158,14 +158,11 @@ func (p *OpenMetricsParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels. // Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed. // It returns the string from which the metric was parsed.
func (p *OpenMetricsParser) Metric(l *labels.Labels) string { func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
// Allocate the full immutable string immediately, so we just // Copy the buffer to a string: this is only necessary for the return value.
// have to create references on it below.
s := string(p.series) s := string(p.series)
*l = append(*l, labels.Label{ p.builder.Reset()
Name: labels.MetricName, p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start])
Value: s[:p.offsets[0]-p.start],
})
for i := 1; i < len(p.offsets); i += 4 { for i := 1; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start a := p.offsets[i] - p.start
@ -173,16 +170,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
c := p.offsets[i+2] - p.start c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start d := p.offsets[i+3] - p.start
value := s[c:d]
// Replacer causes allocations. Replace only when necessary. // Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(s[c:d], byte('\\')) >= 0 { if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) value = lvalReplacer.Replace(value)
continue
} }
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) p.builder.Add(s[a:b], value)
} }
// Sort labels. p.builder.Sort()
sort.Sort(*l) *l = p.builder.Labels()
return s return s
} }
@ -204,17 +201,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
e.Ts = p.exemplarTs e.Ts = p.exemplarTs
} }
p.builder.Reset()
for i := 0; i < len(p.eOffsets); i += 4 { for i := 0; i < len(p.eOffsets); i += 4 {
a := p.eOffsets[i] - p.start a := p.eOffsets[i] - p.start
b := p.eOffsets[i+1] - p.start b := p.eOffsets[i+1] - p.start
c := p.eOffsets[i+2] - p.start c := p.eOffsets[i+2] - p.start
d := p.eOffsets[i+3] - p.start d := p.eOffsets[i+3] - p.start
e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) p.builder.Add(s[a:b], s[c:d])
} }
// Sort the labels. p.builder.Sort()
sort.Sort(e.Labels) e.Labels = p.builder.Labels()
return true return true
} }

View file

@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"sort"
"strconv" "strconv"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -144,6 +143,7 @@ func (l *promlexer) Error(es string) {
// Prometheus text exposition format. // Prometheus text exposition format.
type PromParser struct { type PromParser struct {
l *promlexer l *promlexer
builder labels.ScratchBuilder
series []byte series []byte
text []byte text []byte
mtype MetricType mtype MetricType
@ -168,8 +168,8 @@ func (p *PromParser) Series() ([]byte, *int64, float64) {
return p.series, nil, p.val return p.series, nil, p.val
} }
// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format // Histogram returns (nil, nil, nil, nil) for now because the Prometheus text
// does not support sparse histograms. // format does not support sparse histograms yet.
func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil return nil, nil, nil, nil
} }
@ -212,14 +212,11 @@ func (p *PromParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels. // Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed. // It returns the string from which the metric was parsed.
func (p *PromParser) Metric(l *labels.Labels) string { func (p *PromParser) Metric(l *labels.Labels) string {
// Allocate the full immutable string immediately, so we just // Copy the buffer to a string: this is only necessary for the return value.
// have to create references on it below.
s := string(p.series) s := string(p.series)
*l = append(*l, labels.Label{ p.builder.Reset()
Name: labels.MetricName, p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start])
Value: s[:p.offsets[0]-p.start],
})
for i := 1; i < len(p.offsets); i += 4 { for i := 1; i < len(p.offsets); i += 4 {
a := p.offsets[i] - p.start a := p.offsets[i] - p.start
@ -227,16 +224,16 @@ func (p *PromParser) Metric(l *labels.Labels) string {
c := p.offsets[i+2] - p.start c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start d := p.offsets[i+3] - p.start
value := s[c:d]
// Replacer causes allocations. Replace only when necessary. // Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(s[c:d], byte('\\')) >= 0 { if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) value = lvalReplacer.Replace(value)
continue
} }
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) p.builder.Add(s[a:b], value)
} }
// Sort labels to maintain the sorted labels invariant. p.builder.Sort()
sort.Sort(*l) *l = p.builder.Labels()
return s return s
} }
@ -343,7 +340,7 @@ func (p *PromParser) Next() (Entry, error) {
t2 = p.nextToken() t2 = p.nextToken()
} }
if t2 != tValue { if t2 != tValue {
return EntryInvalid, parseError("expected value after metric", t) return EntryInvalid, parseError("expected value after metric", t2)
} }
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
return EntryInvalid, err return EntryInvalid, err
@ -353,7 +350,7 @@ func (p *PromParser) Next() (Entry, error) {
p.val = math.Float64frombits(value.NormalNaN) p.val = math.Float64frombits(value.NormalNaN)
} }
p.hasTS = false p.hasTS = false
switch p.nextToken() { switch t := p.nextToken(); t {
case tLinebreak: case tLinebreak:
break break
case tTimestamp: case tTimestamp:
@ -362,7 +359,7 @@ func (p *PromParser) Next() (Entry, error) {
return EntryInvalid, err return EntryInvalid, err
} }
if t2 := p.nextToken(); t2 != tLinebreak { if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, parseError("expected next entry after timestamp", t) return EntryInvalid, parseError("expected next entry after timestamp", t2)
} }
default: default:
return EntryInvalid, parseError("expected timestamp or new record", t) return EntryInvalid, parseError("expected timestamp or new record", t)

View file

@ -19,7 +19,6 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"sort"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -59,6 +58,8 @@ type ProtobufParser struct {
// that we have to decode the next MetricFamily. // that we have to decode the next MetricFamily.
state Entry state Entry
builder labels.ScratchBuilder // held here to reduce allocations when building Labels
mf *dto.MetricFamily mf *dto.MetricFamily
// The following are just shenanigans to satisfy the Parser interface. // The following are just shenanigans to satisfy the Parser interface.
@ -104,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
default: default:
v = s.GetQuantile()[p.fieldPos].GetValue() v = s.GetQuantile()[p.fieldPos].GetValue()
} }
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
// This should only happen for a legacy histogram. // This should only happen for a legacy histogram.
h := m.GetHistogram() h := m.GetHistogram()
switch p.fieldPos { switch p.fieldPos {
@ -169,6 +170,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Offset = span.GetOffset()
fh.NegativeSpans[i].Length = span.GetLength() fh.NegativeSpans[i].Length = span.GetLength()
} }
if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
fh.CounterResetHint = histogram.GaugeType
}
fh.Compact(0) fh.Compact(0)
if ts != 0 { if ts != 0 {
return p.metricBytes.Bytes(), &ts, nil, &fh return p.metricBytes.Bytes(), &ts, nil, &fh
@ -198,6 +202,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Offset = span.GetOffset()
sh.NegativeSpans[i].Length = span.GetLength() sh.NegativeSpans[i].Length = span.GetLength()
} }
if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM {
sh.CounterResetHint = histogram.GaugeType
}
sh.Compact(0) sh.Compact(0)
if ts != 0 { if ts != 0 {
return p.metricBytes.Bytes(), &ts, &sh, nil return p.metricBytes.Bytes(), &ts, &sh, nil
@ -224,6 +231,8 @@ func (p *ProtobufParser) Type() ([]byte, MetricType) {
return n, MetricTypeGauge return n, MetricTypeGauge
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM:
return n, MetricTypeHistogram return n, MetricTypeHistogram
case dto.MetricType_GAUGE_HISTOGRAM:
return n, MetricTypeGaugeHistogram
case dto.MetricType_SUMMARY: case dto.MetricType_SUMMARY:
return n, MetricTypeSummary return n, MetricTypeSummary
} }
@ -245,23 +254,19 @@ func (p *ProtobufParser) Comment() []byte {
// Metric writes the labels of the current sample into the passed labels. // Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed. // It returns the string from which the metric was parsed.
func (p *ProtobufParser) Metric(l *labels.Labels) string { func (p *ProtobufParser) Metric(l *labels.Labels) string {
*l = append(*l, labels.Label{ p.builder.Reset()
Name: labels.MetricName, p.builder.Add(labels.MetricName, p.getMagicName())
Value: p.getMagicName(),
})
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
*l = append(*l, labels.Label{ p.builder.Add(lp.GetName(), lp.GetValue())
Name: lp.GetName(),
Value: lp.GetValue(),
})
} }
if needed, name, value := p.getMagicLabel(); needed { if needed, name, value := p.getMagicLabel(); needed {
*l = append(*l, labels.Label{Name: name, Value: value}) p.builder.Add(name, value)
} }
// Sort labels to maintain the sorted labels invariant. // Sort labels to maintain the sorted labels invariant.
sort.Sort(*l) p.builder.Sort()
*l = p.builder.Labels()
return p.metricBytes.String() return p.metricBytes.String()
} }
@ -276,7 +281,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
switch p.mf.GetType() { switch p.mf.GetType() {
case dto.MetricType_COUNTER: case dto.MetricType_COUNTER:
exProto = m.GetCounter().GetExemplar() exProto = m.GetCounter().GetExemplar()
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := m.GetHistogram().GetBucket() bb := m.GetHistogram().GetBucket()
if p.fieldPos < 0 { if p.fieldPos < 0 {
if p.state == EntrySeries { if p.state == EntrySeries {
@ -305,12 +310,12 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
ex.HasTs = true ex.HasTs = true
ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000)
} }
p.builder.Reset()
for _, lp := range exProto.GetLabel() { for _, lp := range exProto.GetLabel() {
ex.Labels = append(ex.Labels, labels.Label{ p.builder.Add(lp.GetName(), lp.GetValue())
Name: lp.GetName(),
Value: lp.GetValue(),
})
} }
p.builder.Sort()
ex.Labels = p.builder.Labels()
return true return true
} }
@ -334,7 +339,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
} }
// We are at the beginning of a metric family. Put only the name // We are at the beginning of a metric family. Put only the name
// into metricBytes and validate only name and help for now. // into metricBytes and validate only name, help, and type for now.
name := p.mf.GetName() name := p.mf.GetName()
if !model.IsValidMetricName(model.LabelValue(name)) { if !model.IsValidMetricName(model.LabelValue(name)) {
return EntryInvalid, errors.Errorf("invalid metric name: %s", name) return EntryInvalid, errors.Errorf("invalid metric name: %s", name)
@ -342,6 +347,17 @@ func (p *ProtobufParser) Next() (Entry, error) {
if help := p.mf.GetHelp(); !utf8.ValidString(help) { if help := p.mf.GetHelp(); !utf8.ValidString(help) {
return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help)
} }
switch p.mf.GetType() {
case dto.MetricType_COUNTER,
dto.MetricType_GAUGE,
dto.MetricType_HISTOGRAM,
dto.MetricType_GAUGE_HISTOGRAM,
dto.MetricType_SUMMARY,
dto.MetricType_UNTYPED:
// All good.
default:
return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType())
}
p.metricBytes.Reset() p.metricBytes.Reset()
p.metricBytes.WriteString(name) p.metricBytes.WriteString(name)
@ -349,7 +365,8 @@ func (p *ProtobufParser) Next() (Entry, error) {
case EntryHelp: case EntryHelp:
p.state = EntryType p.state = EntryType
case EntryType: case EntryType:
if p.mf.GetType() == dto.MetricType_HISTOGRAM && t := p.mf.GetType()
if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) {
p.state = EntryHistogram p.state = EntryHistogram
} else { } else {
@ -359,8 +376,11 @@ func (p *ProtobufParser) Next() (Entry, error) {
return EntryInvalid, err return EntryInvalid, err
} }
case EntryHistogram, EntrySeries: case EntryHistogram, EntrySeries:
t := p.mf.GetType()
if p.state == EntrySeries && !p.fieldsDone && if p.state == EntrySeries && !p.fieldsDone &&
(p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) { (t == dto.MetricType_SUMMARY ||
t == dto.MetricType_HISTOGRAM ||
t == dto.MetricType_GAUGE_HISTOGRAM) {
p.fieldPos++ p.fieldPos++
} else { } else {
p.metricPos++ p.metricPos++
@ -421,7 +441,7 @@ func (p *ProtobufParser) getMagicName() string {
if p.fieldPos == -1 { if p.fieldPos == -1 {
return p.mf.GetName() + "_sum" return p.mf.GetName() + "_sum"
} }
if t == dto.MetricType_HISTOGRAM { if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
return p.mf.GetName() + "_bucket" return p.mf.GetName() + "_bucket"
} }
return p.mf.GetName() return p.mf.GetName()
@ -439,7 +459,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
q := qq[p.fieldPos] q := qq[p.fieldPos]
p.fieldsDone = p.fieldPos == len(qq)-1 p.fieldsDone = p.fieldPos == len(qq)-1
return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile())
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket()
if p.fieldPos >= len(bb) { if p.fieldPos >= len(bb) {
p.fieldsDone = true p.fieldsDone = true

View file

@ -313,6 +313,18 @@ func (m *Manager) TargetsAll() map[string][]*Target {
return targets return targets
} }
// ScrapePools returns the list of all scrape pool names.
func (m *Manager) ScrapePools() []string {
m.mtxScrape.Lock()
defer m.mtxScrape.Unlock()
names := make([]string, 0, len(m.scrapePools))
for name := range m.scrapePools {
names = append(names, name)
}
return names
}
// TargetsActive returns the active targets currently being scraped. // TargetsActive returns the active targets currently being scraped.
func (m *Manager) TargetsActive() map[string][]*Target { func (m *Manager) TargetsActive() map[string][]*Target {
m.mtxScrape.Lock() m.mtxScrape.Lock()

View file

@ -27,7 +27,6 @@ import (
"strconv" "strconv"
"sync" "sync"
"time" "time"
"unsafe"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -268,6 +267,7 @@ type scrapeLoopOptions struct {
const maxAheadTime = 10 * time.Minute const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop"
type labelsMutator func(labels.Labels) labels.Labels type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
@ -498,9 +498,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
} }
targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
for _, t := range targets { for _, t := range targets {
if t.Labels().Len() > 0 { if !t.Labels().IsEmpty() {
all = append(all, t) all = append(all, t)
} else if t.DiscoveredLabels().Len() > 0 { } else if !t.DiscoveredLabels().IsEmpty() {
sp.droppedTargets = append(sp.droppedTargets, t) sp.droppedTargets = append(sp.droppedTargets, t)
} }
} }
@ -634,7 +634,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
met := lset.Get(labels.MetricName) met := lset.Get(labels.MetricName)
if limits.labelLimit > 0 { if limits.labelLimit > 0 {
nbLabels := len(lset) nbLabels := lset.Len()
if nbLabels > int(limits.labelLimit) { if nbLabels > int(limits.labelLimit) {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
} }
@ -644,7 +644,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return nil return nil
} }
for _, l := range lset { return lset.Validate(func(l labels.Label) error {
if limits.labelNameLengthLimit > 0 { if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name) nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) { if nameLength > int(limits.labelNameLengthLimit) {
@ -658,8 +658,8 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
} }
} }
} return nil
return nil })
} }
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
@ -667,37 +667,37 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
targetLabels := target.Labels() targetLabels := target.Labels()
if honor { if honor {
for _, l := range targetLabels { targetLabels.Range(func(l labels.Label) {
if !lset.Has(l.Name) { if !lset.Has(l.Name) {
lb.Set(l.Name, l.Value) lb.Set(l.Name, l.Value)
} }
} })
} else { } else {
var conflictingExposedLabels labels.Labels var conflictingExposedLabels []labels.Label
for _, l := range targetLabels { targetLabels.Range(func(l labels.Label) {
existingValue := lset.Get(l.Name) existingValue := lset.Get(l.Name)
if existingValue != "" { if existingValue != "" {
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue}) conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
} }
// It is now safe to set the target label. // It is now safe to set the target label.
lb.Set(l.Name, l.Value) lb.Set(l.Name, l.Value)
} })
if len(conflictingExposedLabels) > 0 { if len(conflictingExposedLabels) > 0 {
resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels) resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels)
} }
} }
res := lb.Labels(nil) res := lb.Labels(labels.EmptyLabels())
if len(rc) > 0 { if len(rc) > 0 {
res = relabel.Process(res, rc...) res, _ = relabel.Process(res, rc...)
} }
return res return res
} }
func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels, conflictingExposedLabels labels.Labels) { func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels labels.Labels, conflictingExposedLabels []labels.Label) {
sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { sort.SliceStable(conflictingExposedLabels, func(i, j int) bool {
return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name)
}) })
@ -708,7 +708,7 @@ func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLa
newName = model.ExportedLabelPrefix + newName newName = model.ExportedLabelPrefix + newName
if !exposedLabels.Has(newName) && if !exposedLabels.Has(newName) &&
!targetLabels.Has(newName) && !targetLabels.Has(newName) &&
!conflictingExposedLabels[:i].Has(newName) { !labelSliceHas(conflictingExposedLabels[:i], newName) {
conflictingExposedLabels[i].Name = newName conflictingExposedLabels[i].Name = newName
break break
} }
@ -720,15 +720,24 @@ func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLa
} }
} }
func labelSliceHas(lbls []labels.Label, name string) bool {
for _, l := range lbls {
if l.Name == name {
return true
}
}
return false
}
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels { func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
lb := labels.NewBuilder(lset) lb := labels.NewBuilder(lset)
for _, l := range target.Labels() { target.Labels().Range(func(l labels.Label) {
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name)) lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
lb.Set(l.Name, l.Value) lb.Set(l.Name, l.Value)
} })
return lb.Labels(nil) return lb.Labels(labels.EmptyLabels())
} }
// appender returns an appender for ingested samples from the target. // appender returns an appender for ingested samples from the target.
@ -907,8 +916,7 @@ type scrapeCache struct {
series map[string]*cacheEntry series map[string]*cacheEntry
// Cache of dropped metric strings and their iteration. The iteration must // Cache of dropped metric strings and their iteration. The iteration must
// be a pointer so we can update it without setting a new entry with an unsafe // be a pointer so we can update it.
// string in addDropped().
droppedSeries map[string]*uint64 droppedSeries map[string]*uint64
// seriesCur and seriesPrev store the labels of series that were seen // seriesCur and seriesPrev store the labels of series that were seen
@ -996,8 +1004,8 @@ func (c *scrapeCache) iterDone(flushCache bool) {
} }
} }
func (c *scrapeCache) get(met string) (*cacheEntry, bool) { func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) {
e, ok := c.series[met] e, ok := c.series[string(met)]
if !ok { if !ok {
return nil, false return nil, false
} }
@ -1005,20 +1013,20 @@ func (c *scrapeCache) get(met string) (*cacheEntry, bool) {
return e, true return e, true
} }
func (c *scrapeCache) addRef(met string, ref storage.SeriesRef, lset labels.Labels, hash uint64) { func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
if ref == 0 { if ref == 0 {
return return
} }
c.series[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
} }
func (c *scrapeCache) addDropped(met string) { func (c *scrapeCache) addDropped(met []byte) {
iter := c.iter iter := c.iter
c.droppedSeries[met] = &iter c.droppedSeries[string(met)] = &iter
} }
func (c *scrapeCache) getDropped(met string) bool { func (c *scrapeCache) getDropped(met []byte) bool {
iterp, ok := c.droppedSeries[met] iterp, ok := c.droppedSeries[string(met)]
if ok { if ok {
*iterp = c.iter *iterp = c.iter
} }
@ -1042,7 +1050,7 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
@ -1059,12 +1067,12 @@ func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
func (c *scrapeCache) setHelp(metric, help []byte) { func (c *scrapeCache) setHelp(metric, help []byte) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
} }
if e.Help != yoloString(help) { if e.Help != string(help) {
e.Help = string(help) e.Help = string(help)
e.lastIterChange = c.iter e.lastIterChange = c.iter
} }
@ -1076,12 +1084,12 @@ func (c *scrapeCache) setHelp(metric, help []byte) {
func (c *scrapeCache) setUnit(metric, unit []byte) { func (c *scrapeCache) setUnit(metric, unit []byte) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
} }
if e.Unit != yoloString(unit) { if e.Unit != string(unit) {
e.Unit = string(unit) e.Unit = string(unit)
e.lastIterChange = c.iter e.lastIterChange = c.iter
} }
@ -1499,7 +1507,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
sl.cache.metaMtx.Lock() sl.cache.metaMtx.Lock()
defer sl.cache.metaMtx.Unlock() defer sl.cache.metaMtx.Unlock()
metaEntry, metaOk := sl.cache.metadata[yoloString([]byte(lset.Get(labels.MetricName)))] metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)]
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
metadataChanged = true metadataChanged = true
meta.Type = metaEntry.Type meta.Type = metaEntry.Type
@ -1531,9 +1539,10 @@ loop:
parsedTimestamp *int64 parsedTimestamp *int64
val float64 val float64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram
) )
if et, err = p.Next(); err != nil { if et, err = p.Next(); err != nil {
if err == io.EOF { if errors.Is(err, io.EOF) {
err = nil err = nil
} }
break break
@ -1558,8 +1567,7 @@ loop:
t := defTime t := defTime
if isHistogram { if isHistogram {
met, parsedTimestamp, h, _ = p.Histogram() met, parsedTimestamp, h, fh = p.Histogram()
// TODO: ingest float histograms in tsdb.
} else { } else {
met, parsedTimestamp, val = p.Series() met, parsedTimestamp, val = p.Series()
} }
@ -1574,14 +1582,13 @@ loop:
meta = metadata.Metadata{} meta = metadata.Metadata{}
metadataChanged = false metadataChanged = false
if sl.cache.getDropped(yoloString(met)) { if sl.cache.getDropped(met) {
continue continue
} }
ce, ok := sl.cache.get(yoloString(met)) ce, ok := sl.cache.get(met)
var ( var (
ref storage.SeriesRef ref storage.SeriesRef
lset labels.Labels lset labels.Labels
mets string
hash uint64 hash uint64
) )
@ -1592,16 +1599,16 @@ loop:
// Update metadata only if it changed in the current iteration. // Update metadata only if it changed in the current iteration.
updateMetadata(lset, false) updateMetadata(lset, false)
} else { } else {
mets = p.Metric(&lset) p.Metric(&lset)
hash = lset.Hash() hash = lset.Hash()
// Hash label set as it is seen local to the target. Then add target labels // Hash label set as it is seen local to the target. Then add target labels
// and relabeling and store the final label set. // and relabeling and store the final label set.
lset = sl.sampleMutator(lset) lset = sl.sampleMutator(lset)
// The label set may be set to nil to indicate dropping. // The label set may be set to empty to indicate dropping.
if lset == nil { if lset.IsEmpty() {
sl.cache.addDropped(mets) sl.cache.addDropped(met)
continue continue
} }
@ -1626,7 +1633,9 @@ loop:
if isHistogram { if isHistogram {
if h != nil { if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h) ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
} }
} else { } else {
ref, err = app.Append(ref, lset, t, val) ref, err = app.Append(ref, lset, t, val)
@ -1644,7 +1653,7 @@ loop:
// Bypass staleness logic if there is an explicit timestamp. // Bypass staleness logic if there is an explicit timestamp.
sl.cache.trackStaleness(hash, lset) sl.cache.trackStaleness(hash, lset)
} }
sl.cache.addRef(mets, ref, lset, hash) sl.cache.addRef(met, ref, lset, hash)
if sampleAdded && sampleLimitErr == nil { if sampleAdded && sampleLimitErr == nil {
seriesAdded++ seriesAdded++
} }
@ -1709,10 +1718,6 @@ loop:
return return
} }
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample limit errors. // whether the caller should continue to process more samples, and any sample limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) {
@ -1765,15 +1770,15 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache. // with scraped metrics in the cache.
const ( var (
scrapeHealthMetricName = "up" + "\xff" scrapeHealthMetricName = []byte("up" + "\xff")
scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff")
scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff")
samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff")
scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff")
scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff")
scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff")
scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff")
) )
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
@ -1849,7 +1854,7 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
return return
} }
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error { func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64) error {
ce, ok := sl.cache.get(s) ce, ok := sl.cache.get(s)
var ref storage.SeriesRef var ref storage.SeriesRef
var lset labels.Labels var lset labels.Labels
@ -1857,12 +1862,10 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v
ref = ce.ref ref = ce.ref
lset = ce.lset lset = ce.lset
} else { } else {
lset = labels.Labels{ // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache.
// with scraped metrics in the cache. // We have to drop it when building the actual metric.
// We have to drop it when building the actual metric. lset = labels.FromStrings(labels.MetricName, string(s[:len(s)-1]))
labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]},
}
lset = sl.reportSampleMutator(lset) lset = sl.reportSampleMutator(lset)
} }

View file

@ -172,22 +172,20 @@ func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration
// Labels returns a copy of the set of all public labels of the target. // Labels returns a copy of the set of all public labels of the target.
func (t *Target) Labels() labels.Labels { func (t *Target) Labels() labels.Labels {
lset := make(labels.Labels, 0, len(t.labels)) b := labels.NewScratchBuilder(t.labels.Len())
for _, l := range t.labels { t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
lset = append(lset, l) b.Add(l.Name, l.Value)
} }
} })
return lset return b.Labels()
} }
// DiscoveredLabels returns a copy of the target's labels before any processing. // DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels { func (t *Target) DiscoveredLabels() labels.Labels {
t.mtx.Lock() t.mtx.Lock()
defer t.mtx.Unlock() defer t.mtx.Unlock()
lset := make(labels.Labels, len(t.discoveredLabels)) return t.discoveredLabels.Copy()
copy(lset, t.discoveredLabels)
return lset
} }
// SetDiscoveredLabels sets new DiscoveredLabels // SetDiscoveredLabels sets new DiscoveredLabels
@ -205,9 +203,9 @@ func (t *Target) URL() *url.URL {
params[k] = make([]string, len(v)) params[k] = make([]string, len(v))
copy(params[k], v) copy(params[k], v)
} }
for _, l := range t.labels { t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) { if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) {
continue return
} }
ks := l.Name[len(model.ParamLabelPrefix):] ks := l.Name[len(model.ParamLabelPrefix):]
@ -216,7 +214,7 @@ func (t *Target) URL() *url.URL {
} else { } else {
params[ks] = []string{l.Value} params[ks] = []string{l.Value}
} }
} })
return &url.URL{ return &url.URL{
Scheme: t.labels.Get(model.SchemeLabel), Scheme: t.labels.Get(model.SchemeLabel),
@ -374,15 +372,15 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
} }
} }
preRelabelLabels := lb.Labels(nil) preRelabelLabels := lb.Labels(labels.EmptyLabels())
lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...) lset, keep := relabel.Process(preRelabelLabels, cfg.RelabelConfigs...)
// Check if the target was dropped. // Check if the target was dropped.
if lset == nil { if !keep {
return nil, preRelabelLabels, nil return labels.EmptyLabels(), preRelabelLabels, nil
} }
if v := lset.Get(model.AddressLabel); v == "" { if v := lset.Get(model.AddressLabel); v == "" {
return nil, nil, errors.New("no address") return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
} }
lb = labels.NewBuilder(lset) lb = labels.NewBuilder(lset)
@ -413,7 +411,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
case "https": case "https":
addr = addr + ":443" addr = addr + ":443"
default: default:
return nil, nil, errors.Errorf("invalid scheme: %q", cfg.Scheme) return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
} }
lb.Set(model.AddressLabel, addr) lb.Set(model.AddressLabel, addr)
} }
@ -434,50 +432,54 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort
} }
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return nil, nil, err return labels.EmptyLabels(), labels.EmptyLabels(), err
} }
interval := lset.Get(model.ScrapeIntervalLabel) interval := lset.Get(model.ScrapeIntervalLabel)
intervalDuration, err := model.ParseDuration(interval) intervalDuration, err := model.ParseDuration(interval)
if err != nil { if err != nil {
return nil, nil, errors.Errorf("error parsing scrape interval: %v", err) return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape interval: %v", err)
} }
if time.Duration(intervalDuration) == 0 { if time.Duration(intervalDuration) == 0 {
return nil, nil, errors.New("scrape interval cannot be 0") return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0")
} }
timeout := lset.Get(model.ScrapeTimeoutLabel) timeout := lset.Get(model.ScrapeTimeoutLabel)
timeoutDuration, err := model.ParseDuration(timeout) timeoutDuration, err := model.ParseDuration(timeout)
if err != nil { if err != nil {
return nil, nil, errors.Errorf("error parsing scrape timeout: %v", err) return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape timeout: %v", err)
} }
if time.Duration(timeoutDuration) == 0 { if time.Duration(timeoutDuration) == 0 {
return nil, nil, errors.New("scrape timeout cannot be 0") return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0")
} }
if timeoutDuration > intervalDuration { if timeoutDuration > intervalDuration {
return nil, nil, errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
} }
// Meta labels are deleted after relabelling. Other internal labels propagate to // Meta labels are deleted after relabelling. Other internal labels propagate to
// the target which decides whether they will be part of their label set. // the target which decides whether they will be part of their label set.
for _, l := range lset { lset.Range(func(l labels.Label) {
if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { if strings.HasPrefix(l.Name, model.MetaLabelPrefix) {
lb.Del(l.Name) lb.Del(l.Name)
} }
} })
// Default the instance label to the target address. // Default the instance label to the target address.
if v := lset.Get(model.InstanceLabel); v == "" { if v := lset.Get(model.InstanceLabel); v == "" {
lb.Set(model.InstanceLabel, addr) lb.Set(model.InstanceLabel, addr)
} }
res = lb.Labels(nil) res = lb.Labels(labels.EmptyLabels())
for _, l := range res { err = res.Validate(func(l labels.Label) error {
// Check label values are valid, drop the target if not. // Check label values are valid, drop the target if not.
if !model.LabelValue(l.Value).IsValid() { if !model.LabelValue(l.Value).IsValid() {
return nil, nil, errors.Errorf("invalid label value for %q: %q", l.Name, l.Value) return errors.Errorf("invalid label value for %q: %q", l.Name, l.Value)
} }
return nil
})
if err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err
} }
return res, preRelabelLabels, nil return res, preRelabelLabels, nil
} }
@ -501,12 +503,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault
lset := labels.New(lbls...) lset := labels.New(lbls...)
lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort) lset, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
if err != nil { if err != nil {
failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg))
} }
if lbls != nil || origLabels != nil { if !lset.IsEmpty() || !origLabels.IsEmpty() {
targets = append(targets, NewTarget(lbls, origLabels, cfg.Params)) targets = append(targets, NewTarget(lset, origLabels, cfg.Params))
} }
} }
return targets, failures return targets, failures

View file

@ -68,9 +68,11 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
// PeekBack returns the nth previous element of the iterator. If there is none buffered, // PeekBack returns the nth previous element of the iterator. If there is none buffered,
// ok is false. // ok is false.
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, h *histogram.Histogram, ok bool) { func (b *BufferedSeriesIterator) PeekBack(n int) (
t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool,
) {
s, ok := b.buf.nthLast(n) s, ok := b.buf.nthLast(n)
return s.t, s.v, s.h, ok return s.t, s.v, s.h, s.fh, ok
} }
// Buffer returns an iterator over the buffered data. Invalidates previously // Buffer returns an iterator over the buffered data. Invalidates previously

View file

@ -174,14 +174,14 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl
return ref, nil return ref, nil
} }
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) { func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
ref, err := f.primary.AppendHistogram(ref, l, t, h) ref, err := f.primary.AppendHistogram(ref, l, t, h, fh)
if err != nil { if err != nil {
return ref, err return ref, err
} }
for _, appender := range f.secondaries { for _, appender := range f.secondaries {
if _, err := appender.AppendHistogram(ref, l, t, h); err != nil { if _, err := appender.AppendHistogram(ref, l, t, h, fh); err != nil {
return 0, err return 0, err
} }
} }

View file

@ -282,7 +282,7 @@ type HistogramAppender interface {
// For efficiency reasons, the histogram is passed as a // For efficiency reasons, the histogram is passed as a
// pointer. AppendHistogram won't mutate the histogram, but in turn // pointer. AppendHistogram won't mutate the histogram, but in turn
// depends on the caller to not mutate it either. // depends on the caller to not mutate it either.
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
} }
// MetadataUpdater provides an interface for associating metadata to stored series. // MetadataUpdater provides an interface for associating metadata to stored series.
@ -382,7 +382,7 @@ func (s mockSeries) Labels() labels.Labels {
return labels.FromStrings(s.labelSet...) return labels.FromStrings(s.labelSet...)
} }
func (s mockSeries) Iterator() chunkenc.Iterator { func (s mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
return chunkenc.MockSeriesIterator(s.timestamps, s.values) return chunkenc.MockSeriesIterator(s.timestamps, s.values)
} }
@ -421,14 +421,17 @@ type Labels interface {
} }
type SampleIterable interface { type SampleIterable interface {
// Iterator returns a new, independent iterator of the data of the series. // Iterator returns an iterator of the data of the series.
Iterator() chunkenc.Iterator // The iterator passed as argument is for re-use, if not nil.
// Depending on implementation, the iterator can
// be re-used or a new iterator can be allocated.
Iterator(chunkenc.Iterator) chunkenc.Iterator
} }
type ChunkIterable interface { type ChunkIterable interface {
// Iterator returns a new, independent iterator that iterates over potentially overlapping // Iterator returns an iterator that iterates over potentially overlapping
// chunks of the series, sorted by min time. // chunks of the series, sorted by min time.
Iterator() chunks.Iterator Iterator(chunks.Iterator) chunks.Iterator
} }
type Warnings []error type Warnings []error

View file

@ -425,12 +425,8 @@ func ChainedSeriesMerge(series ...Series) Series {
} }
return &SeriesEntry{ return &SeriesEntry{
Lset: series[0].Labels(), Lset: series[0].Labels(),
SampleIteratorFn: func() chunkenc.Iterator { SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
iterators := make([]chunkenc.Iterator, 0, len(series)) return ChainSampleIteratorFromSeries(it, series)
for _, s := range series {
iterators = append(iterators, s.Iterator())
}
return NewChainSampleIterator(iterators)
}, },
} }
} }
@ -444,17 +440,48 @@ type chainSampleIterator struct {
curr chunkenc.Iterator curr chunkenc.Iterator
lastT int64 lastT int64
// Whether the previous and the current sample are direct neighbors
// within the same base iterator.
consecutive bool
} }
// NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted // Return a chainSampleIterator initialized for length entries, re-using the memory from it if possible.
// fashion. If samples overlap, one sample from overlapped ones is kept (randomly) and all others with the same func getChainSampleIterator(it chunkenc.Iterator, length int) *chainSampleIterator {
// timestamp are dropped. csi, ok := it.(*chainSampleIterator)
func NewChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator { if !ok {
return &chainSampleIterator{ csi = &chainSampleIterator{}
iterators: iterators,
h: nil,
lastT: math.MinInt64,
} }
if cap(csi.iterators) < length {
csi.iterators = make([]chunkenc.Iterator, length)
} else {
csi.iterators = csi.iterators[:length]
}
csi.h = nil
csi.lastT = math.MinInt64
return csi
}
func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunkenc.Iterator {
csi := getChainSampleIterator(it, len(series))
for i, s := range series {
csi.iterators[i] = s.Iterator(csi.iterators[i])
}
return csi
}
func ChainSampleIteratorFromMetas(it chunkenc.Iterator, chunks []chunks.Meta) chunkenc.Iterator {
csi := getChainSampleIterator(it, len(chunks))
for i, c := range chunks {
csi.iterators[i] = c.Chunk.Iterator(csi.iterators[i])
}
return csi
}
func ChainSampleIteratorFromIterators(it chunkenc.Iterator, iterators []chunkenc.Iterator) chunkenc.Iterator {
csi := getChainSampleIterator(it, 0)
csi.iterators = iterators
return csi
} }
func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType { func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
@ -462,6 +489,9 @@ func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
if c.curr != nil && c.lastT >= t { if c.curr != nil && c.lastT >= t {
return c.curr.Seek(c.lastT) return c.curr.Seek(c.lastT)
} }
// Don't bother to find out if the next sample is consecutive. Callers
// of Seek usually aren't interested anyway.
c.consecutive = false
c.h = samplesIteratorHeap{} c.h = samplesIteratorHeap{}
for _, iter := range c.iterators { for _, iter := range c.iterators {
if iter.Seek(t) != chunkenc.ValNone { if iter.Seek(t) != chunkenc.ValNone {
@ -488,14 +518,30 @@ func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) {
if c.curr == nil { if c.curr == nil {
panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.") panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.")
} }
return c.curr.AtHistogram() t, h := c.curr.AtHistogram()
// If the current sample is not consecutive with the previous one, we
// cannot be sure anymore that there was no counter reset.
if !c.consecutive && h.CounterResetHint == histogram.NotCounterReset {
h.CounterResetHint = histogram.UnknownCounterReset
}
return t, h
} }
func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
if c.curr == nil { if c.curr == nil {
panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.") panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.")
} }
return c.curr.AtFloatHistogram() t, fh := c.curr.AtFloatHistogram()
// If the current sample is not consecutive with the previous one, we
// cannot be sure anymore about counter resets for counter histograms.
// TODO(beorn7): If a `NotCounterReset` sample is followed by a
// non-consecutive `CounterReset` sample, we could keep the hint as
// `CounterReset`. But then we needed to track the previous sample
// in more detail, which might not be worth it.
if !c.consecutive && fh.CounterResetHint != histogram.GaugeType {
fh.CounterResetHint = histogram.UnknownCounterReset
}
return t, fh
} }
func (c *chainSampleIterator) AtT() int64 { func (c *chainSampleIterator) AtT() int64 {
@ -506,7 +552,13 @@ func (c *chainSampleIterator) AtT() int64 {
} }
func (c *chainSampleIterator) Next() chunkenc.ValueType { func (c *chainSampleIterator) Next() chunkenc.ValueType {
var (
currT int64
currValueType chunkenc.ValueType
iteratorChanged bool
)
if c.h == nil { if c.h == nil {
iteratorChanged = true
c.h = samplesIteratorHeap{} c.h = samplesIteratorHeap{}
// We call c.curr.Next() as the first thing below. // We call c.curr.Next() as the first thing below.
// So, we don't call Next() on it here. // So, we don't call Next() on it here.
@ -522,8 +574,6 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
return chunkenc.ValNone return chunkenc.ValNone
} }
var currT int64
var currValueType chunkenc.ValueType
for { for {
currValueType = c.curr.Next() currValueType = c.curr.Next()
if currValueType != chunkenc.ValNone { if currValueType != chunkenc.ValNone {
@ -553,6 +603,7 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
} }
c.curr = heap.Pop(&c.h).(chunkenc.Iterator) c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
iteratorChanged = true
currT = c.curr.AtT() currT = c.curr.AtT()
currValueType = c.curr.Seek(currT) currValueType = c.curr.Seek(currT)
if currT != c.lastT { if currT != c.lastT {
@ -560,6 +611,7 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType {
} }
} }
c.consecutive = !iteratorChanged
c.lastT = currT c.lastT = currT
return currValueType return currValueType
} }
@ -607,10 +659,10 @@ func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalC
} }
return &ChunkSeriesEntry{ return &ChunkSeriesEntry{
Lset: series[0].Labels(), Lset: series[0].Labels(),
ChunkIteratorFn: func() chunks.Iterator { ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
iterators := make([]chunks.Iterator, 0, len(series)) iterators := make([]chunks.Iterator, 0, len(series))
for _, s := range series { for _, s := range series {
iterators = append(iterators, s.Iterator()) iterators = append(iterators, s.Iterator(nil))
} }
return &compactChunkIterator{ return &compactChunkIterator{
mergeFunc: mergeFunc, mergeFunc: mergeFunc,
@ -676,7 +728,7 @@ func (c *compactChunkIterator) Next() bool {
// 1:1 duplicates, skip it. // 1:1 duplicates, skip it.
} else { } else {
// We operate on same series, so labels does not matter here. // We operate on same series, so labels does not matter here.
overlapping = append(overlapping, newChunkToSeriesDecoder(nil, next)) overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
if next.MaxTime > oMaxTime { if next.MaxTime > oMaxTime {
oMaxTime = next.MaxTime oMaxTime = next.MaxTime
} }
@ -693,7 +745,7 @@ func (c *compactChunkIterator) Next() bool {
} }
// Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here. // Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here.
iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)).Iterator() iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), c.curr))...)).Iterator(nil)
if !iter.Next() { if !iter.Next() {
if c.err = iter.Err(); c.err != nil { if c.err = iter.Err(); c.err != nil {
return false return false
@ -751,10 +803,10 @@ func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc {
} }
return &ChunkSeriesEntry{ return &ChunkSeriesEntry{
Lset: series[0].Labels(), Lset: series[0].Labels(),
ChunkIteratorFn: func() chunks.Iterator { ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
iterators := make([]chunks.Iterator, 0, len(series)) iterators := make([]chunks.Iterator, 0, len(series))
for _, s := range series { for _, s := range series {
iterators = append(iterators, s.Iterator()) iterators = append(iterators, s.Iterator(nil))
} }
return &concatenatingChunkIterator{ return &concatenatingChunkIterator{
iterators: iterators, iterators: iterators,

View file

@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
) )
// decodeReadLimit is the maximum size of a read request body in bytes. // decodeReadLimit is the maximum size of a read request body in bytes.
@ -115,9 +116,10 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHi
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) { func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) {
numSamples := 0 numSamples := 0
resp := &prompb.QueryResult{} resp := &prompb.QueryResult{}
var iter chunkenc.Iterator
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
iter := series.Iterator() iter = series.Iterator(iter)
samples := []prompb.Sample{} samples := []prompb.Sample{}
for iter.Next() == chunkenc.ValFloat { for iter.Next() == chunkenc.ValFloat {
@ -151,10 +153,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet { func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
series := make([]storage.Series, 0, len(res.Timeseries)) series := make([]storage.Series, 0, len(res.Timeseries))
for _, ts := range res.Timeseries { for _, ts := range res.Timeseries {
lbls := labelProtosToLabels(ts.Labels) if err := validateLabelsAndMetricName(ts.Labels); err != nil {
if err := validateLabelsAndMetricName(lbls); err != nil {
return errSeriesSet{err: err} return errSeriesSet{err: err}
} }
lbls := labelProtosToLabels(ts.Labels)
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples}) series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples})
} }
@ -199,17 +201,19 @@ func StreamChunkedReadResponses(
var ( var (
chks []prompb.Chunk chks []prompb.Chunk
lbls []prompb.Label lbls []prompb.Label
iter chunks.Iterator
) )
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
iter := series.Iterator() iter = series.Iterator(iter)
lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
frameBytesLeft := maxBytesInFrame maxDataLength := maxBytesInFrame
for _, lbl := range lbls { for _, lbl := range lbls {
frameBytesLeft -= lbl.Size() maxDataLength -= lbl.Size()
} }
frameBytesLeft := maxDataLength
isNext := iter.Next() isNext := iter.Next()
@ -255,6 +259,7 @@ func StreamChunkedReadResponses(
// We immediately flush the Write() so it is safe to return to the pool. // We immediately flush the Write() so it is safe to return to the pool.
marshalPool.Put(&b) marshalPool.Put(&b)
chks = chks[:0] chks = chks[:0]
frameBytesLeft = maxDataLength
} }
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return ss.Warnings(), err return ss.Warnings(), err
@ -343,10 +348,14 @@ type concreteSeries struct {
} }
func (c *concreteSeries) Labels() labels.Labels { func (c *concreteSeries) Labels() labels.Labels {
return labels.New(c.labels...) return c.labels.Copy()
} }
func (c *concreteSeries) Iterator() chunkenc.Iterator { func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
if csi, ok := it.(*concreteSeriesIterator); ok {
csi.reset(c)
return csi
}
return newConcreteSeriersIterator(c) return newConcreteSeriersIterator(c)
} }
@ -363,6 +372,11 @@ func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
} }
} }
func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.cur = -1
c.series = series
}
// Seek implements storage.SeriesIterator. // Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.cur == -1 { if c.cur == -1 {
@ -429,7 +443,7 @@ func (c *concreteSeriesIterator) Err() error {
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
// also making sure that there are no labels with duplicate names // also making sure that there are no labels with duplicate names
func validateLabelsAndMetricName(ls labels.Labels) error { func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls { for i, l := range ls {
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
return fmt.Errorf("invalid metric name: %v", l.Value) return fmt.Errorf("invalid metric name: %v", l.Value)
@ -511,18 +525,37 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the // HistogramProtoToHistogram extracts a (normal integer) Histogram from the
// provided proto message. The caller has to make sure that the proto message // provided proto message. The caller has to make sure that the proto message
// represents an interger histogram and not a float histogram. // represents an integer histogram and not a float histogram.
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
return &histogram.Histogram{ return &histogram.Histogram{
Schema: hp.Schema, CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
ZeroThreshold: hp.ZeroThreshold, Schema: hp.Schema,
ZeroCount: hp.GetZeroCountInt(), ZeroThreshold: hp.ZeroThreshold,
Count: hp.GetCountInt(), ZeroCount: hp.GetZeroCountInt(),
Sum: hp.Sum, Count: hp.GetCountInt(),
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), Sum: hp.Sum,
PositiveBuckets: hp.GetPositiveDeltas(), PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), PositiveBuckets: hp.GetPositiveDeltas(),
NegativeBuckets: hp.GetNegativeDeltas(), NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: hp.GetNegativeDeltas(),
}
}
// HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the
// provided proto message to a Float Histogram. The caller has to make sure that
// the proto message represents an float histogram and not a integer histogram.
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: hp.GetZeroCountFloat(),
Count: hp.GetCountFloat(),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: hp.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: hp.GetNegativeCounts(),
} }
} }
@ -546,6 +579,23 @@ func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.H
NegativeDeltas: h.NegativeBuckets, NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans), PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets, PositiveDeltas: h.PositiveBuckets,
ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
Timestamp: timestamp,
}
}
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
Timestamp: timestamp, Timestamp: timestamp,
} }
} }
@ -569,30 +619,24 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
} }
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels { func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
result := make(labels.Labels, 0, len(labelPairs)) b := labels.ScratchBuilder{}
for _, l := range labelPairs { for _, l := range labelPairs {
result = append(result, labels.Label{ b.Add(l.Name, l.Value)
Name: l.Name,
Value: l.Value,
})
} }
sort.Sort(result) b.Sort()
return result return b.Labels()
} }
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice // labelsToLabelsProto transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels. // will be used to avoid allocations if it is big enough to store the labels.
func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label { func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
result := buf[:0] result := buf[:0]
if cap(buf) < len(labels) { lbls.Range(func(l labels.Label) {
result = make([]prompb.Label, 0, len(labels))
}
for _, l := range labels {
result = append(result, prompb.Label{ result = append(result, prompb.Label{
Name: l.Name, Name: l.Name,
Value: l.Value, Value: l.Value,
}) })
} })
return result return result
} }

View file

@ -396,7 +396,7 @@ type QueueManager struct {
flushDeadline time.Duration flushDeadline time.Duration
cfg config.QueueConfig cfg config.QueueConfig
mcfg config.MetadataConfig mcfg config.MetadataConfig
externalLabels labels.Labels externalLabels []labels.Label
relabelConfigs []*relabel.Config relabelConfigs []*relabel.Config
sendExemplars bool sendExemplars bool
sendNativeHistograms bool sendNativeHistograms bool
@ -454,13 +454,19 @@ func NewQueueManager(
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
// Copy externalLabels into slice which we need for processExternalLabels.
extLabelsSlice := make([]labels.Label, 0, externalLabels.Len())
externalLabels.Range(func(l labels.Label) {
extLabelsSlice = append(extLabelsSlice, l)
})
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
t := &QueueManager{ t := &QueueManager{
logger: logger, logger: logger,
flushDeadline: flushDeadline, flushDeadline: flushDeadline,
cfg: cfg, cfg: cfg,
mcfg: mCfg, mcfg: mCfg,
externalLabels: externalLabels, externalLabels: extLabelsSlice,
relabelConfigs: relabelConfigs, relabelConfigs: relabelConfigs,
storeClient: client, storeClient: client,
sendExemplars: enableExemplarRemoteWrite, sendExemplars: enableExemplarRemoteWrite,
@ -710,6 +716,53 @@ outer:
return true return true
} }
func (t *QueueManager) AppendFloatHistograms(floatHistograms []record.RefFloatHistogramSample) bool {
if !t.sendNativeHistograms {
return true
}
outer:
for _, h := range floatHistograms {
t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[h.Ref]
if !ok {
t.metrics.droppedHistogramsTotal.Inc()
t.dataDropped.incr(1)
if _, ok := t.droppedSeries[h.Ref]; !ok {
level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref)
}
t.seriesMtx.Unlock()
continue
}
t.seriesMtx.Unlock()
backoff := model.Duration(5 * time.Millisecond)
for {
select {
case <-t.quit:
return false
default:
}
if t.shards.enqueue(h.Ref, timeSeries{
seriesLabels: lbls,
timestamp: h.T,
floatHistogram: h.FH,
sType: tFloatHistogram,
}) {
continue outer
}
t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff
}
}
}
return true
}
// Start the queue manager sending samples to the remote storage. // Start the queue manager sending samples to the remote storage.
// Does not block. // Does not block.
func (t *QueueManager) Start() { func (t *QueueManager) Start() {
@ -769,8 +822,8 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
t.seriesSegmentIndexes[s.Ref] = index t.seriesSegmentIndexes[s.Ref] = index
ls := processExternalLabels(s.Labels, t.externalLabels) ls := processExternalLabels(s.Labels, t.externalLabels)
lbls := relabel.Process(ls, t.relabelConfigs...) lbls, keep := relabel.Process(ls, t.relabelConfigs...)
if len(lbls) == 0 { if !keep || lbls.IsEmpty() {
t.droppedSeries[s.Ref] = struct{}{} t.droppedSeries[s.Ref] = struct{}{}
continue continue
} }
@ -831,44 +884,33 @@ func (t *QueueManager) client() WriteClient {
} }
func (t *QueueManager) internLabels(lbls labels.Labels) { func (t *QueueManager) internLabels(lbls labels.Labels) {
for i, l := range lbls { lbls.InternStrings(t.interner.intern)
lbls[i].Name = t.interner.intern(l.Name)
lbls[i].Value = t.interner.intern(l.Value)
}
} }
func (t *QueueManager) releaseLabels(ls labels.Labels) { func (t *QueueManager) releaseLabels(ls labels.Labels) {
for _, l := range ls { ls.ReleaseStrings(t.interner.release)
t.interner.release(l.Name)
t.interner.release(l.Value)
}
} }
// processExternalLabels merges externalLabels into ls. If ls contains // processExternalLabels merges externalLabels into ls. If ls contains
// a label in externalLabels, the value in ls wins. // a label in externalLabels, the value in ls wins.
func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels { func processExternalLabels(ls labels.Labels, externalLabels []labels.Label) labels.Labels {
i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) b := labels.NewScratchBuilder(ls.Len() + len(externalLabels))
for i < len(ls) && j < len(externalLabels) { j := 0
if ls[i].Name < externalLabels[j].Name { ls.Range(func(l labels.Label) {
result = append(result, labels.Label{ for j < len(externalLabels) && l.Name > externalLabels[j].Name {
Name: ls[i].Name, b.Add(externalLabels[j].Name, externalLabels[j].Value)
Value: ls[i].Value,
})
i++
} else if ls[i].Name > externalLabels[j].Name {
result = append(result, externalLabels[j])
j++
} else {
result = append(result, labels.Label{
Name: ls[i].Name,
Value: ls[i].Value,
})
i++
j++ j++
} }
if j < len(externalLabels) && l.Name == externalLabels[j].Name {
j++
}
b.Add(l.Name, l.Value)
})
for ; j < len(externalLabels); j++ {
b.Add(externalLabels[j].Name, externalLabels[j].Value)
} }
return append(append(result, ls[i:]...), externalLabels[j:]...) return b.Labels()
} }
func (t *QueueManager) updateShardsLoop() { func (t *QueueManager) updateShardsLoop() {
@ -1134,7 +1176,7 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
case tExemplar: case tExemplar:
s.qm.metrics.pendingExemplars.Inc() s.qm.metrics.pendingExemplars.Inc()
s.enqueuedExemplars.Inc() s.enqueuedExemplars.Inc()
case tHistogram: case tHistogram, tFloatHistogram:
s.qm.metrics.pendingHistograms.Inc() s.qm.metrics.pendingHistograms.Inc()
s.enqueuedHistograms.Inc() s.enqueuedHistograms.Inc()
} }
@ -1159,6 +1201,7 @@ type timeSeries struct {
seriesLabels labels.Labels seriesLabels labels.Labels
value float64 value float64
histogram *histogram.Histogram histogram *histogram.Histogram
floatHistogram *histogram.FloatHistogram
timestamp int64 timestamp int64
exemplarLabels labels.Labels exemplarLabels labels.Labels
// The type of series: sample, exemplar, or histogram. // The type of series: sample, exemplar, or histogram.
@ -1171,6 +1214,7 @@ const (
tSample seriesType = iota tSample seriesType = iota
tExemplar tExemplar
tHistogram tHistogram
tFloatHistogram
) )
func newQueue(batchSize, capacity int) *queue { func newQueue(batchSize, capacity int) *queue {
@ -1358,7 +1402,8 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if len(batch) > 0 { if len(batch) > 0 {
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData) nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
n := nPendingSamples + nPendingExemplars + nPendingHistograms n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
} }
queue.ReturnForReuse(batch) queue.ReturnForReuse(batch)
@ -1399,6 +1444,9 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
case tHistogram: case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram)) pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++ nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
} }
} }
return nPendingSamples, nPendingExemplars, nPendingHistograms return nPendingSamples, nPendingExemplars, nPendingHistograms

View file

@ -180,9 +180,11 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers .
// We return the new set of matchers, along with a map of labels for which // We return the new set of matchers, along with a map of labels for which
// matchers were added, so that these can later be removed from the result // matchers were added, so that these can later be removed from the result
// time series again. // time series again.
func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) { func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []string) {
el := make(labels.Labels, len(q.externalLabels)) el := make([]labels.Label, 0, q.externalLabels.Len())
copy(el, q.externalLabels) q.externalLabels.Range(func(l labels.Label) {
el = append(el, l)
})
// ms won't be sorted, so have to O(n^2) the search. // ms won't be sorted, so have to O(n^2) the search.
for _, m := range ms { for _, m := range ms {
@ -202,7 +204,11 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, lab
} }
ms = append(ms, m) ms = append(ms, m)
} }
return ms, el names := make([]string, len(el))
for i := range el {
names[i] = el[i].Name
}
return ms, names
} }
// LabelValues implements storage.Querier and is a noop. // LabelValues implements storage.Querier and is a noop.
@ -234,7 +240,8 @@ func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, match
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...)) return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...))
} }
func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet { // Note strings in toFilter must be sorted.
func newSeriesSetFilter(ss storage.SeriesSet, toFilter []string) storage.SeriesSet {
return &seriesSetFilter{ return &seriesSetFilter{
SeriesSet: ss, SeriesSet: ss,
toFilter: toFilter, toFilter: toFilter,
@ -243,7 +250,7 @@ func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.Se
type seriesSetFilter struct { type seriesSetFilter struct {
storage.SeriesSet storage.SeriesSet
toFilter labels.Labels toFilter []string // Label names to remove from result
querier storage.Querier querier storage.Querier
} }
@ -264,20 +271,12 @@ func (ssf seriesSetFilter) At() storage.Series {
type seriesFilter struct { type seriesFilter struct {
storage.Series storage.Series
toFilter labels.Labels toFilter []string // Label names to remove from result
} }
func (sf seriesFilter) Labels() labels.Labels { func (sf seriesFilter) Labels() labels.Labels {
labels := sf.Series.Labels() b := labels.NewBuilder(sf.Series.Labels())
for i, j := 0, 0; i < len(labels) && j < len(sf.toFilter); { // todo: check if this is too inefficient.
if labels[i].Name < sf.toFilter[j].Name { b.Del(sf.toFilter...)
i++ return b.Labels(labels.EmptyLabels())
} else if labels[i].Name > sf.toFilter[j].Name {
j++
} else {
labels = labels[:i+copy(labels[i:], labels[i+1:])]
j++
}
}
return labels
} }

View file

@ -278,7 +278,7 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels,
return 0, nil return 0, nil
} }
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) { func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
t.histograms++ t.histograms++
if ts > t.highestTimestamp { if ts > t.highestTimestamp {
t.highestTimestamp = ts t.highestTimestamp = ts

View file

@ -125,14 +125,19 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
} }
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
hs := HistogramProtoToHistogram(hp) if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram.
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs) fhs := HistogramProtoToFloatHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
} else {
hs := HistogramProtoToHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
}
if err != nil { if err != nil {
unwrappedErr := errors.Unwrap(err) unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil { if unwrappedErr == nil {
unwrappedErr = err unwrappedErr = err
} }
// Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future. // a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)

View file

@ -27,26 +27,31 @@ import (
type SeriesEntry struct { type SeriesEntry struct {
Lset labels.Labels Lset labels.Labels
SampleIteratorFn func() chunkenc.Iterator SampleIteratorFn func(chunkenc.Iterator) chunkenc.Iterator
} }
func (s *SeriesEntry) Labels() labels.Labels { return s.Lset } func (s *SeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *SeriesEntry) Iterator() chunkenc.Iterator { return s.SampleIteratorFn() } func (s *SeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return s.SampleIteratorFn(it) }
type ChunkSeriesEntry struct { type ChunkSeriesEntry struct {
Lset labels.Labels Lset labels.Labels
ChunkIteratorFn func() chunks.Iterator ChunkIteratorFn func(chunks.Iterator) chunks.Iterator
} }
func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset } func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *ChunkSeriesEntry) Iterator() chunks.Iterator { return s.ChunkIteratorFn() } func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) }
// NewListSeries returns series entry with iterator that allows to iterate over provided samples. // NewListSeries returns series entry with iterator that allows to iterate over provided samples.
func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
samplesS := Samples(samples(s))
return &SeriesEntry{ return &SeriesEntry{
Lset: lset, Lset: lset,
SampleIteratorFn: func() chunkenc.Iterator { SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
return NewListSeriesIterator(samples(s)) if lsi, ok := it.(*listSeriesIterator); ok {
lsi.Reset(samplesS)
return lsi
}
return NewListSeriesIterator(samplesS)
}, },
} }
} }
@ -56,11 +61,21 @@ func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry { func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
return &ChunkSeriesEntry{ return &ChunkSeriesEntry{
Lset: lset, Lset: lset,
ChunkIteratorFn: func() chunks.Iterator { ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
chks := make([]chunks.Meta, 0, len(samples)) lcsi, existing := it.(*listChunkSeriesIterator)
var chks []chunks.Meta
if existing {
chks = lcsi.chks[:0]
} else {
chks = make([]chunks.Meta, 0, len(samples))
}
for _, s := range samples { for _, s := range samples {
chks = append(chks, tsdbutil.ChunkFromSamples(s)) chks = append(chks, tsdbutil.ChunkFromSamples(s))
} }
if existing {
lcsi.Reset(chks...)
return lcsi
}
return NewListChunkSeriesIterator(chks...) return NewListChunkSeriesIterator(chks...)
}, },
} }
@ -87,6 +102,11 @@ func NewListSeriesIterator(samples Samples) chunkenc.Iterator {
return &listSeriesIterator{samples: samples, idx: -1} return &listSeriesIterator{samples: samples, idx: -1}
} }
func (it *listSeriesIterator) Reset(samples Samples) {
it.samples = samples
it.idx = -1
}
func (it *listSeriesIterator) At() (int64, float64) { func (it *listSeriesIterator) At() (int64, float64) {
s := it.samples.Get(it.idx) s := it.samples.Get(it.idx)
return s.T(), s.V() return s.T(), s.V()
@ -150,6 +170,11 @@ func NewListChunkSeriesIterator(chks ...chunks.Meta) chunks.Iterator {
return &listChunkSeriesIterator{chks: chks, idx: -1} return &listChunkSeriesIterator{chks: chks, idx: -1}
} }
func (it *listChunkSeriesIterator) Reset(chks ...chunks.Meta) {
it.chks = chks
it.idx = -1
}
func (it *listChunkSeriesIterator) At() chunks.Meta { func (it *listChunkSeriesIterator) At() chunks.Meta {
return it.chks[it.idx] return it.chks[it.idx]
} }
@ -164,6 +189,7 @@ func (it *listChunkSeriesIterator) Err() error { return nil }
type chunkSetToSeriesSet struct { type chunkSetToSeriesSet struct {
ChunkSeriesSet ChunkSeriesSet
iter chunks.Iterator
chkIterErr error chkIterErr error
sameSeriesChunks []Series sameSeriesChunks []Series
} }
@ -178,18 +204,18 @@ func (c *chunkSetToSeriesSet) Next() bool {
return false return false
} }
iter := c.ChunkSeriesSet.At().Iterator() c.iter = c.ChunkSeriesSet.At().Iterator(c.iter)
c.sameSeriesChunks = c.sameSeriesChunks[:0] c.sameSeriesChunks = nil
for iter.Next() { for c.iter.Next() {
c.sameSeriesChunks = append( c.sameSeriesChunks = append(
c.sameSeriesChunks, c.sameSeriesChunks,
newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), iter.At()), newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), c.iter.At()),
) )
} }
if iter.Err() != nil { if c.iter.Err() != nil {
c.chkIterErr = iter.Err() c.chkIterErr = c.iter.Err()
return false return false
} }
return true return true
@ -210,9 +236,9 @@ func (c *chunkSetToSeriesSet) Err() error {
func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series { func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series {
return &SeriesEntry{ return &SeriesEntry{
Lset: labels, Lset: labels,
SampleIteratorFn: func() chunkenc.Iterator { SampleIteratorFn: func(it chunkenc.Iterator) chunkenc.Iterator {
// TODO(bwplotka): Can we provide any chunkenc buffer? // TODO(bwplotka): Can we provide any chunkenc buffer?
return chk.Chunk.Iterator(nil) return chk.Chunk.Iterator(it)
}, },
} }
} }
@ -252,7 +278,7 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries {
return &seriesToChunkEncoder{series} return &seriesToChunkEncoder{series}
} }
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
var ( var (
chk chunkenc.Chunk chk chunkenc.Chunk
app chunkenc.Appender app chunkenc.Appender
@ -261,9 +287,14 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
mint := int64(math.MaxInt64) mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64) maxt := int64(math.MinInt64)
chks := []chunks.Meta{} var chks []chunks.Meta
lcsi, existing := it.(*listChunkSeriesIterator)
if existing {
chks = lcsi.chks[:0]
}
i := 0 i := 0
seriesIter := s.Series.Iterator() seriesIter := s.Series.Iterator(nil)
lastType := chunkenc.ValNone lastType := chunkenc.ValNone
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
if typ != lastType || i >= seriesToChunkEncoderSplit { if typ != lastType || i >= seriesToChunkEncoderSplit {
@ -290,9 +321,10 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
lastType = typ lastType = typ
var ( var (
t int64 t int64
v float64 v float64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram
) )
switch typ { switch typ {
case chunkenc.ValFloat: case chunkenc.ValFloat:
@ -301,6 +333,9 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
t, h = seriesIter.AtHistogram() t, h = seriesIter.AtHistogram()
app.AppendHistogram(t, h) app.AppendHistogram(t, h)
case chunkenc.ValFloatHistogram:
t, fh = seriesIter.AtFloatHistogram()
app.AppendFloatHistogram(t, fh)
default: default:
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
} }
@ -323,6 +358,10 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
}) })
} }
if existing {
lcsi.Reset(chks...)
return lcsi
}
return NewListChunkSeriesIterator(chks...) return NewListChunkSeriesIterator(chks...)
} }
@ -362,7 +401,6 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64,
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
t, fh := iter.AtFloatHistogram() t, fh := iter.AtFloatHistogram()
result = append(result, newSampleFn(t, 0, nil, fh)) result = append(result, newSampleFn(t, 0, nil, fh))
} }
} }
} }

View file

@ -72,17 +72,17 @@ type IndexReader interface {
// Postings returns the postings list iterator for the label pairs. // Postings returns the postings list iterator for the label pairs.
// The Postings here contain the offsets to the series inside the index. // The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g. // Found IDs are not strictly required to point to a valid Series, e.g.
// during background garbage collections. Input values must be sorted. // during background garbage collections.
Postings(name string, values ...string) (index.Postings, error) Postings(name string, values ...string) (index.Postings, error)
// SortedPostings returns a postings list that is reordered to be sorted // SortedPostings returns a postings list that is reordered to be sorted
// by the label set of the underlying series. // by the label set of the underlying series.
SortedPostings(index.Postings) index.Postings SortedPostings(index.Postings) index.Postings
// Series populates the given labels and chunk metas for the series identified // Series populates the given builder and chunk metas for the series identified
// by the reference. // by the reference.
// Returns storage.ErrNotFound if the ref does not resolve to a known series. // Returns storage.ErrNotFound if the ref does not resolve to a known series.
Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error
// LabelNames returns all the unique label names present in the index in sorted order. // LabelNames returns all the unique label names present in the index in sorted order.
LabelNames(matchers ...*labels.Matcher) ([]string, error) LabelNames(matchers ...*labels.Matcher) ([]string, error)
@ -499,8 +499,8 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
return r.ir.SortedPostings(p) return r.ir.SortedPostings(p)
} }
func (r blockIndexReader) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
if err := r.ir.Series(ref, lset, chks); err != nil { if err := r.ir.Series(ref, builder, chks); err != nil {
return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
} }
return nil return nil
@ -561,12 +561,12 @@ func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
// Choose only valid postings which have chunks in the time-range. // Choose only valid postings which have chunks in the time-range.
stones := tombstones.NewMemTombstones() stones := tombstones.NewMemTombstones()
var lset labels.Labels
var chks []chunks.Meta var chks []chunks.Meta
var builder labels.ScratchBuilder
Outer: Outer:
for p.Next() { for p.Next() {
err := ir.Series(p.At(), &lset, &chks) err := ir.Series(p.At(), &builder, &chks)
if err != nil { if err != nil {
return err return err
} }

View file

@ -30,6 +30,7 @@ const (
EncNone Encoding = iota EncNone Encoding = iota
EncXOR EncXOR
EncHistogram EncHistogram
EncFloatHistogram
) )
func (e Encoding) String() string { func (e Encoding) String() string {
@ -40,6 +41,8 @@ func (e Encoding) String() string {
return "XOR" return "XOR"
case EncHistogram: case EncHistogram:
return "histogram" return "histogram"
case EncFloatHistogram:
return "floathistogram"
} }
return "<unknown>" return "<unknown>"
} }
@ -57,7 +60,7 @@ func IsOutOfOrderChunk(e Encoding) bool {
// IsValidEncoding returns true for supported encodings. // IsValidEncoding returns true for supported encodings.
func IsValidEncoding(e Encoding) bool { func IsValidEncoding(e Encoding) bool {
return e == EncXOR || e == EncOOOXOR || e == EncHistogram return e == EncXOR || e == EncOOOXOR || e == EncHistogram || e == EncFloatHistogram
} }
// Chunk holds a sequence of sample pairs that can be iterated over and appended to. // Chunk holds a sequence of sample pairs that can be iterated over and appended to.
@ -91,6 +94,7 @@ type Chunk interface {
type Appender interface { type Appender interface {
Append(int64, float64) Append(int64, float64)
AppendHistogram(t int64, h *histogram.Histogram) AppendHistogram(t int64, h *histogram.Histogram)
AppendFloatHistogram(t int64, h *histogram.FloatHistogram)
} }
// Iterator is a simple iterator that can only get the next value. // Iterator is a simple iterator that can only get the next value.
@ -159,6 +163,8 @@ func (v ValueType) ChunkEncoding() Encoding {
return EncXOR return EncXOR
case ValHistogram: case ValHistogram:
return EncHistogram return EncHistogram
case ValFloatHistogram:
return EncFloatHistogram
default: default:
return EncNone return EncNone
} }
@ -228,8 +234,9 @@ type Pool interface {
// pool is a memory pool of chunk objects. // pool is a memory pool of chunk objects.
type pool struct { type pool struct {
xor sync.Pool xor sync.Pool
histogram sync.Pool histogram sync.Pool
floatHistogram sync.Pool
} }
// NewPool returns a new pool. // NewPool returns a new pool.
@ -245,6 +252,11 @@ func NewPool() Pool {
return &HistogramChunk{b: bstream{}} return &HistogramChunk{b: bstream{}}
}, },
}, },
floatHistogram: sync.Pool{
New: func() interface{} {
return &FloatHistogramChunk{b: bstream{}}
},
},
} }
} }
@ -260,6 +272,11 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
c.b.stream = b c.b.stream = b
c.b.count = 0 c.b.count = 0
return c, nil return c, nil
case EncFloatHistogram:
c := p.floatHistogram.Get().(*FloatHistogramChunk)
c.b.stream = b
c.b.count = 0
return c, nil
} }
return nil, errors.Errorf("invalid chunk encoding %q", e) return nil, errors.Errorf("invalid chunk encoding %q", e)
} }
@ -288,6 +305,17 @@ func (p *pool) Put(c Chunk) error {
sh.b.stream = nil sh.b.stream = nil
sh.b.count = 0 sh.b.count = 0
p.histogram.Put(c) p.histogram.Put(c)
case EncFloatHistogram:
sh, ok := c.(*FloatHistogramChunk)
// This may happen often with wrapped chunks. Nothing we can really do about
// it but returning an error would cause a lot of allocations again. Thus,
// we just skip it.
if !ok {
return nil
}
sh.b.stream = nil
sh.b.count = 0
p.floatHistogram.Put(c)
default: default:
return errors.Errorf("invalid chunk encoding %q", c.Encoding()) return errors.Errorf("invalid chunk encoding %q", c.Encoding())
} }
@ -303,6 +331,8 @@ func FromData(e Encoding, d []byte) (Chunk, error) {
return &XORChunk{b: bstream{count: 0, stream: d}}, nil return &XORChunk{b: bstream{count: 0, stream: d}}, nil
case EncHistogram: case EncHistogram:
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
case EncFloatHistogram:
return &FloatHistogramChunk{b: bstream{count: 0, stream: d}}, nil
} }
return nil, errors.Errorf("invalid chunk encoding %q", e) return nil, errors.Errorf("invalid chunk encoding %q", e)
} }
@ -314,6 +344,8 @@ func NewEmptyChunk(e Encoding) (Chunk, error) {
return NewXORChunk(), nil return NewXORChunk(), nil
case EncHistogram: case EncHistogram:
return NewHistogramChunk(), nil return NewHistogramChunk(), nil
case EncFloatHistogram:
return NewFloatHistogramChunk(), nil
} }
return nil, errors.Errorf("invalid chunk encoding %q", e) return nil, errors.Errorf("invalid chunk encoding %q", e)
} }

View file

@ -0,0 +1,831 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"encoding/binary"
"math"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
)
// FloatHistogramChunk holds encoded sample data for a sparse, high-resolution
// float histogram.
//
// Each sample has multiple "fields", stored in the following way (raw = store
// number directly, delta = store delta to the previous number, dod = store
// delta of the delta to the previous number, xor = what we do for regular
// sample values):
//
// field → ts count zeroCount sum []posbuckets []negbuckets
// sample 1 raw raw raw raw []raw []raw
// sample 2 delta xor xor xor []xor []xor
// sample >2 dod xor xor xor []xor []xor
type FloatHistogramChunk struct {
b bstream
}
// NewFloatHistogramChunk returns a new chunk with float histogram encoding.
func NewFloatHistogramChunk() *FloatHistogramChunk {
b := make([]byte, 3, 128)
return &FloatHistogramChunk{b: bstream{stream: b, count: 0}}
}
// xorValue holds all the necessary information to encode
// and decode XOR encoded float64 values.
type xorValue struct {
value float64
leading uint8
trailing uint8
}
// Encoding returns the encoding type.
func (c *FloatHistogramChunk) Encoding() Encoding {
return EncFloatHistogram
}
// Bytes returns the underlying byte slice of the chunk.
func (c *FloatHistogramChunk) Bytes() []byte {
return c.b.bytes()
}
// NumSamples returns the number of samples in the chunk.
func (c *FloatHistogramChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
// Layout returns the histogram layout. Only call this on chunks that have at
// least one sample.
func (c *FloatHistogramChunk) Layout() (
schema int32, zeroThreshold float64,
negativeSpans, positiveSpans []histogram.Span,
err error,
) {
if c.NumSamples() == 0 {
panic("FloatHistogramChunk.Layout() called on an empty chunk")
}
b := newBReader(c.Bytes()[2:])
return readHistogramChunkLayout(&b)
}
// SetCounterResetHeader sets the counter reset header.
func (c *FloatHistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
setCounterResetHeader(h, c.Bytes())
}
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *FloatHistogramChunk) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(c.Bytes()[2] & 0b11000000)
}
// Compact implements the Chunk interface.
func (c *FloatHistogramChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)
copy(buf, c.b.stream)
c.b.stream = buf
}
}
// Appender implements the Chunk interface.
func (c *FloatHistogramChunk) Appender() (Appender, error) {
it := c.iterator(nil)
// To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state.
for it.Next() == ValFloatHistogram {
}
if err := it.Err(); err != nil {
return nil, err
}
pBuckets := make([]xorValue, len(it.pBuckets))
for i := 0; i < len(it.pBuckets); i++ {
pBuckets[i] = xorValue{
value: it.pBuckets[i],
leading: it.pBucketsLeading[i],
trailing: it.pBucketsTrailing[i],
}
}
nBuckets := make([]xorValue, len(it.nBuckets))
for i := 0; i < len(it.nBuckets); i++ {
nBuckets[i] = xorValue{
value: it.nBuckets[i],
leading: it.nBucketsLeading[i],
trailing: it.nBucketsTrailing[i],
}
}
a := &FloatHistogramAppender{
b: &c.b,
schema: it.schema,
zThreshold: it.zThreshold,
pSpans: it.pSpans,
nSpans: it.nSpans,
t: it.t,
tDelta: it.tDelta,
cnt: it.cnt,
zCnt: it.zCnt,
pBuckets: pBuckets,
nBuckets: nBuckets,
sum: it.sum,
}
if it.numTotal == 0 {
a.sum.leading = 0xff
a.cnt.leading = 0xff
a.zCnt.leading = 0xff
}
return a, nil
}
func (c *FloatHistogramChunk) iterator(it Iterator) *floatHistogramIterator {
// This comment is copied from XORChunk.iterator:
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
// When using striped locks to guard access to chunks, probably yes.
// Could only copy data if the chunk is not completed yet.
if histogramIter, ok := it.(*floatHistogramIterator); ok {
histogramIter.Reset(c.b.bytes())
return histogramIter
}
return newFloatHistogramIterator(c.b.bytes())
}
func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
it := &floatHistogramIterator{
br: newBReader(b),
numTotal: binary.BigEndian.Uint16(b),
t: math.MinInt64,
}
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
_, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
return it
}
// Iterator implements the Chunk interface.
func (c *FloatHistogramChunk) Iterator(it Iterator) Iterator {
return c.iterator(it)
}
// FloatHistogramAppender is an Appender implementation for float histograms.
type FloatHistogramAppender struct {
b *bstream
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
t, tDelta int64
sum, cnt, zCnt xorValue
pBuckets, nBuckets []xorValue
}
func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
}
func (a *FloatHistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
func (a *FloatHistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
// AppendHistogram implements Appender. This implementation panics because integer
// histogram samples must never be appended to a float histogram chunk.
func (a *FloatHistogramAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended an integer histogram to a float histogram chunk")
}
// Appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false.
func (a *FloatHistogramAppender) Appendable(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return
}
if h.Count < a.cnt.value {
// There has been a counter reset.
counterReset = true
return
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return
}
if h.ZeroCount < a.zCnt.value {
// There has been a counter reset since ZeroThreshold didn't change.
counterReset = true
return
}
var ok bool
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
if !ok {
counterReset = true
return
}
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
if !ok {
counterReset = true
return
}
if counterResetInAnyFloatBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
counterResetInAnyFloatBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
counterReset, positiveInserts, negativeInserts = true, nil, nil
return
}
okToAppend = true
return
}
// AppendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *FloatHistogramAppender) AppendableGauge(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
return
}
// counterResetInAnyFloatBucket returns true if there was a counter reset for any
// bucket. This should be called only when the bucket layout is the same or new
// buckets were added. It does not handle the case of buckets missing.
func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, oldSpans, newSpans []histogram.Span) bool {
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
return false
}
oldSpanSliceIdx, newSpanSliceIdx := 0, 0 // Index for the span slices.
oldInsideSpanIdx, newInsideSpanIdx := uint32(0), uint32(0) // Index inside a span.
oldIdx, newIdx := oldSpans[0].Offset, newSpans[0].Offset
oldBucketSliceIdx, newBucketSliceIdx := 0, 0 // Index inside bucket slice.
oldVal, newVal := oldBuckets[0].value, newBuckets[0]
// Since we assume that new spans won't have missing buckets, there will never be a case
// where the old index will not find a matching new index.
for {
if oldIdx == newIdx {
if newVal < oldVal {
return true
}
}
if oldIdx <= newIdx {
// Moving ahead old bucket and span by 1 index.
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 {
// Current span is over.
oldSpanSliceIdx++
oldInsideSpanIdx = 0
if oldSpanSliceIdx >= len(oldSpans) {
// All old spans are over.
break
}
oldIdx += 1 + oldSpans[oldSpanSliceIdx].Offset
} else {
oldInsideSpanIdx++
oldIdx++
}
oldBucketSliceIdx++
oldVal = oldBuckets[oldBucketSliceIdx].value
}
if oldIdx > newIdx {
// Moving ahead new bucket and span by 1 index.
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 {
// Current span is over.
newSpanSliceIdx++
newInsideSpanIdx = 0
if newSpanSliceIdx >= len(newSpans) {
// All new spans are over.
// This should not happen, old spans above should catch this first.
panic("new spans over before old spans in counterReset")
}
newIdx += 1 + newSpans[newSpanSliceIdx].Offset
} else {
newInsideSpanIdx++
newIdx++
}
newBucketSliceIdx++
newVal = newBuckets[newBucketSliceIdx]
}
}
return false
}
// AppendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
// Appendable() and act accordingly!
func (a *FloatHistogramAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) {
var tDelta int64
num := binary.BigEndian.Uint16(a.b.bytes())
if value.IsStaleNaN(h.Sum) {
// Emptying out other fields to write no buckets, and an empty
// layout in case of first histogram in the chunk.
h = &histogram.FloatHistogram{Sum: h.Sum}
}
if num == 0 {
// The first append gets the privilege to dictate the layout
// but it's also responsible for encoding it into the chunk!
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
a.schema = h.Schema
a.zThreshold = h.ZeroThreshold
if len(h.PositiveSpans) > 0 {
a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
copy(a.pSpans, h.PositiveSpans)
} else {
a.pSpans = nil
}
if len(h.NegativeSpans) > 0 {
a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
copy(a.nSpans, h.NegativeSpans)
} else {
a.nSpans = nil
}
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 {
a.pBuckets = make([]xorValue, numPBuckets)
for i := 0; i < numPBuckets; i++ {
a.pBuckets[i] = xorValue{
value: h.PositiveBuckets[i],
leading: 0xff,
}
}
} else {
a.pBuckets = nil
}
if numNBuckets > 0 {
a.nBuckets = make([]xorValue, numNBuckets)
for i := 0; i < numNBuckets; i++ {
a.nBuckets[i] = xorValue{
value: h.NegativeBuckets[i],
leading: 0xff,
}
}
} else {
a.nBuckets = nil
}
// Now store the actual data.
putVarbitInt(a.b, t)
a.b.writeBits(math.Float64bits(h.Count), 64)
a.b.writeBits(math.Float64bits(h.ZeroCount), 64)
a.b.writeBits(math.Float64bits(h.Sum), 64)
a.cnt.value = h.Count
a.zCnt.value = h.ZeroCount
a.sum.value = h.Sum
for _, b := range h.PositiveBuckets {
a.b.writeBits(math.Float64bits(b), 64)
}
for _, b := range h.NegativeBuckets {
a.b.writeBits(math.Float64bits(b), 64)
}
} else {
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
// so we don't need a separate single delta logic for the 2nd sample.
tDelta = t - a.t
tDod := tDelta - a.tDelta
putVarbitInt(a.b, tDod)
a.writeXorValue(&a.cnt, h.Count)
a.writeXorValue(&a.zCnt, h.ZeroCount)
a.writeXorValue(&a.sum, h.Sum)
for i, b := range h.PositiveBuckets {
a.writeXorValue(&a.pBuckets[i], b)
}
for i, b := range h.NegativeBuckets {
a.writeXorValue(&a.nBuckets[i], b)
}
}
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
a.t = t
a.tDelta = tDelta
}
func (a *FloatHistogramAppender) writeXorValue(old *xorValue, v float64) {
xorWrite(a.b, v, old.value, &old.leading, &old.trailing)
old.value = v
}
// Recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided inserts,
// resulting in the honoring of the provided new positive and negative spans. To
// continue appending, use the returned Appender rather than the receiver of
// this method.
func (a *FloatHistogramAppender) Recode(
positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) {
// TODO(beorn7): This currently just decodes everything and then encodes
// it again with the new span layout. This can probably be done in-place
// by editing the chunk. But let's first see how expensive it is in the
// big picture. Also, in-place editing might create concurrency issues.
byts := a.b.bytes()
it := newFloatHistogramIterator(byts)
hc := NewFloatHistogramChunk()
app, err := hc.Appender()
if err != nil {
panic(err)
}
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
for it.Next() == ValFloatHistogram {
tOld, hOld := it.AtFloatHistogram()
// We have to newly allocate slices for the modified buckets
// here because they are kept by the appender until the next
// append.
// TODO(beorn7): We might be able to optimize this.
var positiveBuckets, negativeBuckets []float64
if numPositiveBuckets > 0 {
positiveBuckets = make([]float64, numPositiveBuckets)
}
if numNegativeBuckets > 0 {
negativeBuckets = make([]float64, numNegativeBuckets)
}
// Save the modified histogram to the new chunk.
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
if len(positiveInserts) > 0 {
hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, false)
}
if len(negativeInserts) > 0 {
hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, false)
}
app.AppendFloatHistogram(tOld, hOld)
}
hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000))
return hc, app
}
// RecodeHistogramm converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
func (a *FloatHistogramAppender) RecodeHistogramm(
fh *histogram.FloatHistogram,
pBackwardInter, nBackwardInter []Insert,
) {
if len(pBackwardInter) > 0 {
numPositiveBuckets := countSpans(fh.PositiveSpans)
fh.PositiveBuckets = insert(fh.PositiveBuckets, make([]float64, numPositiveBuckets), pBackwardInter, false)
}
if len(nBackwardInter) > 0 {
numNegativeBuckets := countSpans(fh.NegativeSpans)
fh.NegativeBuckets = insert(fh.NegativeBuckets, make([]float64, numNegativeBuckets), nBackwardInter, false)
}
}
type floatHistogramIterator struct {
br bstreamReader
numTotal uint16
numRead uint16
counterResetHeader CounterResetHeader
// Layout:
schema int32
zThreshold float64
pSpans, nSpans []histogram.Span
// For the fields that are tracked as deltas and ultimately dod's.
t int64
tDelta int64
// All Gorilla xor encoded.
sum, cnt, zCnt xorValue
// Buckets are not of type xorValue to avoid creating
// new slices for every AtFloatHistogram call.
pBuckets, nBuckets []float64
pBucketsLeading, nBucketsLeading []uint8
pBucketsTrailing, nBucketsTrailing []uint8
err error
// Track calls to retrieve methods. Once they have been called, we
// cannot recycle the bucket slices anymore because we have returned
// them in the histogram.
atFloatHistogramCalled bool
}
func (it *floatHistogramIterator) Seek(t int64) ValueType {
if it.err != nil {
return ValNone
}
for t > it.t || it.numRead == 0 {
if it.Next() == ValNone {
return ValNone
}
}
return ValFloatHistogram
}
func (it *floatHistogramIterator) At() (int64, float64) {
panic("cannot call floatHistogramIterator.At")
}
func (it *floatHistogramIterator) AtHistogram() (int64, *histogram.Histogram) {
panic("cannot call floatHistogramIterator.AtHistogram")
}
func (it *floatHistogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
if value.IsStaleNaN(it.sum.value) {
return it.t, &histogram.FloatHistogram{Sum: it.sum.value}
}
it.atFloatHistogramCalled = true
return it.t, &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt.value,
ZeroCount: it.zCnt.value,
Sum: it.sum.value,
ZeroThreshold: it.zThreshold,
Schema: it.schema,
PositiveSpans: it.pSpans,
NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
}
}
func (it *floatHistogramIterator) AtT() int64 {
return it.t
}
func (it *floatHistogramIterator) Err() error {
return it.err
}
func (it *floatHistogramIterator) Reset(b []byte) {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
it.br = newBReader(b[3:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.t, it.tDelta = 0, 0
it.cnt, it.zCnt, it.sum = xorValue{}, xorValue{}, xorValue{}
if it.atFloatHistogramCalled {
it.atFloatHistogramCalled = false
it.pBuckets, it.nBuckets = nil, nil
} else {
it.pBuckets, it.nBuckets = it.pBuckets[:0], it.nBuckets[:0]
}
it.pBucketsLeading, it.pBucketsTrailing = it.pBucketsLeading[:0], it.pBucketsTrailing[:0]
it.nBucketsLeading, it.nBucketsTrailing = it.nBucketsLeading[:0], it.nBucketsTrailing[:0]
it.err = nil
}
func (it *floatHistogramIterator) Next() ValueType {
if it.err != nil || it.numRead == it.numTotal {
return ValNone
}
if it.numRead == 0 {
// The first read is responsible for reading the chunk layout
// and for initializing fields that depend on it. We give
// counter reset info at chunk level, hence we discard it here.
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
if err != nil {
it.err = err
return ValNone
}
it.schema = schema
it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
// Allocate bucket slices as needed, recycling existing slices
// in case this iterator was reset and already has slices of a
// sufficient capacity.
if numPBuckets > 0 {
it.pBuckets = append(it.pBuckets, make([]float64, numPBuckets)...)
it.pBucketsLeading = append(it.pBucketsLeading, make([]uint8, numPBuckets)...)
it.pBucketsTrailing = append(it.pBucketsTrailing, make([]uint8, numPBuckets)...)
}
if numNBuckets > 0 {
it.nBuckets = append(it.nBuckets, make([]float64, numNBuckets)...)
it.nBucketsLeading = append(it.nBucketsLeading, make([]uint8, numNBuckets)...)
it.nBucketsTrailing = append(it.nBucketsTrailing, make([]uint8, numNBuckets)...)
}
// Now read the actual data.
t, err := readVarbitInt(&it.br)
if err != nil {
it.err = err
return ValNone
}
it.t = t
cnt, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.cnt.value = math.Float64frombits(cnt)
zcnt, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.zCnt.value = math.Float64frombits(zcnt)
sum, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.sum.value = math.Float64frombits(sum)
for i := range it.pBuckets {
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.pBuckets[i] = math.Float64frombits(v)
}
for i := range it.nBuckets {
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return ValNone
}
it.nBuckets[i] = math.Float64frombits(v)
}
it.numRead++
return ValFloatHistogram
}
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
// so we don't need a separate single delta logic for the 2nd sample.
// Recycle bucket slices that have not been returned yet. Otherwise, copy them.
// We can always recycle the slices for leading and trailing bits as they are
// never returned to the caller.
if it.atFloatHistogramCalled {
it.atFloatHistogramCalled = false
if len(it.pBuckets) > 0 {
newBuckets := make([]float64, len(it.pBuckets))
copy(newBuckets, it.pBuckets)
it.pBuckets = newBuckets
} else {
it.pBuckets = nil
}
if len(it.nBuckets) > 0 {
newBuckets := make([]float64, len(it.nBuckets))
copy(newBuckets, it.nBuckets)
it.nBuckets = newBuckets
} else {
it.nBuckets = nil
}
}
tDod, err := readVarbitInt(&it.br)
if err != nil {
it.err = err
return ValNone
}
it.tDelta = it.tDelta + tDod
it.t += it.tDelta
if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {
return ValNone
}
if ok := it.readXor(&it.zCnt.value, &it.zCnt.leading, &it.zCnt.trailing); !ok {
return ValNone
}
if ok := it.readXor(&it.sum.value, &it.sum.leading, &it.sum.trailing); !ok {
return ValNone
}
if value.IsStaleNaN(it.sum.value) {
it.numRead++
return ValFloatHistogram
}
for i := range it.pBuckets {
if ok := it.readXor(&it.pBuckets[i], &it.pBucketsLeading[i], &it.pBucketsTrailing[i]); !ok {
return ValNone
}
}
for i := range it.nBuckets {
if ok := it.readXor(&it.nBuckets[i], &it.nBucketsLeading[i], &it.nBucketsTrailing[i]); !ok {
return ValNone
}
}
it.numRead++
return ValFloatHistogram
}
func (it *floatHistogramIterator) readXor(v *float64, leading, trailing *uint8) bool {
err := xorRead(&it.br, v, leading, trailing)
if err != nil {
it.err = err
return false
}
return true
}

View file

@ -67,7 +67,7 @@ func (c *HistogramChunk) Layout() (
err error, err error,
) { ) {
if c.NumSamples() == 0 { if c.NumSamples() == 0 {
panic("HistoChunk.Layout() called on an empty chunk") panic("HistogramChunk.Layout() called on an empty chunk")
} }
b := newBReader(c.Bytes()[2:]) b := newBReader(c.Bytes()[2:])
return readHistogramChunkLayout(&b) return readHistogramChunkLayout(&b)
@ -88,17 +88,22 @@ const (
UnknownCounterReset CounterResetHeader = 0b00000000 UnknownCounterReset CounterResetHeader = 0b00000000
) )
// SetCounterResetHeader sets the counter reset header. // setCounterResetHeader sets the counter reset header of the chunk
func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) { // The third byte of the chunk is the counter reset header.
func setCounterResetHeader(h CounterResetHeader, bytes []byte) {
switch h { switch h {
case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset: case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset:
bytes := c.Bytes()
bytes[2] = (bytes[2] & 0b00111111) | byte(h) bytes[2] = (bytes[2] & 0b00111111) | byte(h)
default: default:
panic("invalid CounterResetHeader type") panic("invalid CounterResetHeader type")
} }
} }
// SetCounterResetHeader sets the counter reset header.
func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
setCounterResetHeader(h, c.Bytes())
}
// GetCounterResetHeader returns the info about the first 2 bits of the chunk // GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header. // header.
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader { func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
@ -172,6 +177,7 @@ func newHistogramIterator(b []byte) *histogramIterator {
// The first 3 bytes contain chunk headers. // The first 3 bytes contain chunk headers.
// We skip that for actual samples. // We skip that for actual samples.
_, _ = it.br.readBits(24) _, _ = it.br.readBits(24)
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
return it return it
} }
@ -217,36 +223,50 @@ type HistogramAppender struct {
trailing uint8 trailing uint8
} }
func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
return CounterResetHeader(a.b.bytes()[2] & 0b11000000)
}
func (a *HistogramAppender) NumSamples() int {
return int(binary.BigEndian.Uint16(a.b.bytes()))
}
// Append implements Appender. This implementation panics because normal float // Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk. // samples must never be appended to a histogram chunk.
func (a *HistogramAppender) Append(int64, float64) { func (a *HistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk") panic("appended a float sample to a histogram chunk")
} }
// Appendable returns whether the chunk can be appended to, and if so // AppendFloatHistogram implements Appender. This implementation panics because float
// whether any recoding needs to happen using the provided interjections // histogram samples must never be appended to a histogram chunk.
// (in case of any new buckets, positive or negative range, respectively). func (a *HistogramAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to a histogram chunk")
}
// Appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
// //
// The chunk is not appendable in the following cases: // The chunk is not appendable in the following cases:
// //
// • The schema has changed. // - The schema has changed.
// // - The threshold for the zero bucket has changed.
// • The threshold for the zero bucket has changed. // - Any buckets have disappeared.
// // - There was a counter reset in the count of observations or in any bucket,
// • Any buckets have disappeared. // including the zero bucket.
// // - The last sample in the chunk was stale while the current sample is not stale.
// • There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
//
// • The last sample in the chunk was stale while the current sample is not stale.
// //
// The method returns an additional boolean set to true if it is not appendable // The method returns an additional boolean set to true if it is not appendable
// because of a counter reset. If the given sample is stale, it is always ok to // because of a counter reset. If the given sample is stale, it is always ok to
// append. If counterReset is true, okToAppend is always false. // append. If counterReset is true, okToAppend is always false.
func (a *HistogramAppender) Appendable(h *histogram.Histogram) ( func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
positiveInterjections, negativeInterjections []Interjection, positiveInserts, negativeInserts []Insert,
okToAppend, counterReset bool, okToAppend, counterReset bool,
) { ) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
return
}
if value.IsStaleNaN(h.Sum) { if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter. // This is a stale sample whose buckets and spans don't matter.
okToAppend = true okToAppend = true
@ -275,12 +295,12 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
} }
var ok bool var ok bool
positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans) positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
if !ok { if !ok {
counterReset = true counterReset = true
return return
} }
negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans) negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
if !ok { if !ok {
counterReset = true counterReset = true
return return
@ -288,7 +308,7 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) || if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) { counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
counterReset, positiveInterjections, negativeInterjections = true, nil, nil counterReset, positiveInserts, negativeInserts = true, nil, nil
return return
} }
@ -296,6 +316,50 @@ func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
return return
} }
// AppendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// This method must be only used for gauge histograms.
//
// The chunk is not appendable in the following cases:
// - The schema has changed.
// - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale.
func (a *HistogramAppender) AppendableGauge(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span,
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
return
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
return
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
return
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
return
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
return
}
// counterResetInAnyBucket returns true if there was a counter reset for any // counterResetInAnyBucket returns true if there was a counter reset for any
// bucket. This should be called only when the bucket layout is the same or new // bucket. This should be called only when the bucket layout is the same or new
// buckets were added. It does not handle the case of buckets missing. // buckets were added. It does not handle the case of buckets missing.
@ -425,8 +489,9 @@ func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
putVarbitInt(a.b, b) putVarbitInt(a.b, b)
} }
} else { } else {
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // The case for the 2nd sample with single deltas is implicitly
// so we don't need a separate single delta logic for the 2nd sample. // handled correctly with the double delta code, so we don't
// need a separate single delta logic for the 2nd sample.
tDelta = t - a.t tDelta = t - a.t
cntDelta = int64(h.Count) - int64(a.cnt) cntDelta = int64(h.Count) - int64(a.cnt)
@ -476,12 +541,12 @@ func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
} }
// Recode converts the current chunk to accommodate an expansion of the set of // Recode converts the current chunk to accommodate an expansion of the set of
// (positive and/or negative) buckets used, according to the provided // (positive and/or negative) buckets used, according to the provided inserts,
// interjections, resulting in the honoring of the provided new positive and // resulting in the honoring of the provided new positive and negative spans. To
// negative spans. To continue appending, use the returned Appender rather than // continue appending, use the returned Appender rather than the receiver of
// the receiver of this method. // this method.
func (a *HistogramAppender) Recode( func (a *HistogramAppender) Recode(
positiveInterjections, negativeInterjections []Interjection, positiveInserts, negativeInserts []Insert,
positiveSpans, negativeSpans []histogram.Span, positiveSpans, negativeSpans []histogram.Span,
) (Chunk, Appender) { ) (Chunk, Appender) {
// TODO(beorn7): This currently just decodes everything and then encodes // TODO(beorn7): This currently just decodes everything and then encodes
@ -514,11 +579,11 @@ func (a *HistogramAppender) Recode(
// Save the modified histogram to the new chunk. // Save the modified histogram to the new chunk.
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
if len(positiveInterjections) > 0 { if len(positiveInserts) > 0 {
hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections) hOld.PositiveBuckets = insert(hOld.PositiveBuckets, positiveBuckets, positiveInserts, true)
} }
if len(negativeInterjections) > 0 { if len(negativeInserts) > 0 {
hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections) hOld.NegativeBuckets = insert(hOld.NegativeBuckets, negativeBuckets, negativeInserts, true)
} }
app.AppendHistogram(tOld, hOld) app.AppendHistogram(tOld, hOld)
} }
@ -527,6 +592,22 @@ func (a *HistogramAppender) Recode(
return hc, app return hc, app
} }
// RecodeHistogram converts the current histogram (in-place) to accommodate an
// expansion of the set of (positive and/or negative) buckets used.
func (a *HistogramAppender) RecodeHistogram(
h *histogram.Histogram,
pBackwardInserts, nBackwardInserts []Insert,
) {
if len(pBackwardInserts) > 0 {
numPositiveBuckets := countSpans(h.PositiveSpans)
h.PositiveBuckets = insert(h.PositiveBuckets, make([]int64, numPositiveBuckets), pBackwardInserts, true)
}
if len(nBackwardInserts) > 0 {
numNegativeBuckets := countSpans(h.NegativeSpans)
h.NegativeBuckets = insert(h.NegativeBuckets, make([]int64, numNegativeBuckets), nBackwardInserts, true)
}
}
func (a *HistogramAppender) writeSumDelta(v float64) { func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing) xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
} }
@ -536,6 +617,8 @@ type histogramIterator struct {
numTotal uint16 numTotal uint16
numRead uint16 numRead uint16
counterResetHeader CounterResetHeader
// Layout: // Layout:
schema int32 schema int32
zThreshold float64 zThreshold float64
@ -585,15 +668,16 @@ func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) {
} }
it.atHistogramCalled = true it.atHistogramCalled = true
return it.t, &histogram.Histogram{ return it.t, &histogram.Histogram{
Count: it.cnt, CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
ZeroCount: it.zCnt, Count: it.cnt,
Sum: it.sum, ZeroCount: it.zCnt,
ZeroThreshold: it.zThreshold, Sum: it.sum,
Schema: it.schema, ZeroThreshold: it.zThreshold,
PositiveSpans: it.pSpans, Schema: it.schema,
NegativeSpans: it.nSpans, PositiveSpans: it.pSpans,
PositiveBuckets: it.pBuckets, NegativeSpans: it.nSpans,
NegativeBuckets: it.nBuckets, PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets,
} }
} }
@ -603,15 +687,16 @@ func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogra
} }
it.atFloatHistogramCalled = true it.atFloatHistogramCalled = true
return it.t, &histogram.FloatHistogram{ return it.t, &histogram.FloatHistogram{
Count: float64(it.cnt), CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
ZeroCount: float64(it.zCnt), Count: float64(it.cnt),
Sum: it.sum, ZeroCount: float64(it.zCnt),
ZeroThreshold: it.zThreshold, Sum: it.sum,
Schema: it.schema, ZeroThreshold: it.zThreshold,
PositiveSpans: it.pSpans, Schema: it.schema,
NegativeSpans: it.nSpans, PositiveSpans: it.pSpans,
PositiveBuckets: it.pFloatBuckets, NegativeSpans: it.nSpans,
NegativeBuckets: it.nFloatBuckets, PositiveBuckets: it.pFloatBuckets,
NegativeBuckets: it.nFloatBuckets,
} }
} }
@ -630,6 +715,8 @@ func (it *histogramIterator) Reset(b []byte) {
it.numTotal = binary.BigEndian.Uint16(b) it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0 it.numRead = 0
it.counterResetHeader = CounterResetHeader(b[2] & 0b11000000)
it.t, it.cnt, it.zCnt = 0, 0, 0 it.t, it.cnt, it.zCnt = 0, 0, 0
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0 it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0

View file

@ -19,7 +19,10 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
) )
func writeHistogramChunkLayout(b *bstream, schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span) { func writeHistogramChunkLayout(
b *bstream, schema int32, zeroThreshold float64,
positiveSpans, negativeSpans []histogram.Span,
) {
putZeroThreshold(b, zeroThreshold) putZeroThreshold(b, zeroThreshold)
putVarbitInt(b, int64(schema)) putVarbitInt(b, int64(schema))
putHistogramChunkLayoutSpans(b, positiveSpans) putHistogramChunkLayoutSpans(b, positiveSpans)
@ -91,9 +94,7 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
// putZeroThreshold writes the zero threshold to the bstream. It stores typical // putZeroThreshold writes the zero threshold to the bstream. It stores typical
// values in just one byte, but needs 9 bytes for other values. In detail: // values in just one byte, but needs 9 bytes for other values. In detail:
// // - If the threshold is 0, store a single zero byte.
// * If the threshold is 0, store a single zero byte.
//
// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10, // - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
// take the exponent from the IEEE 754 representation of the threshold, which // take the exponent from the IEEE 754 representation of the threshold, which
// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242 // covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
@ -103,7 +104,6 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
// threshold. The default value for the zero threshold is 2^-128 (or // threshold. The default value for the zero threshold is 2^-128 (or
// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a // 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
// single byte (with value 116). // single byte (with value 116).
//
// - In all other cases, store 255 as a single byte, followed by the 8 bytes of // - In all other cases, store 255 as a single byte, followed by the 8 bytes of
// the threshold as a float64, i.e. taking 9 bytes in total. // the threshold as a float64, i.e. taking 9 bytes in total.
func putZeroThreshold(b *bstream, threshold float64) { func putZeroThreshold(b *bstream, threshold float64) {
@ -165,35 +165,37 @@ func (b *bucketIterator) Next() (int, bool) {
if b.span >= len(b.spans) { if b.span >= len(b.spans) {
return 0, false return 0, false
} }
try: if b.bucket < int(b.spans[b.span].Length)-1 { // Try to move within same span.
if b.bucket < int(b.spans[b.span].Length-1) { // Try to move within same span.
b.bucket++ b.bucket++
b.idx++ b.idx++
return b.idx, true return b.idx, true
} else if b.span < len(b.spans)-1 { // Try to move from one span to the next. }
for b.span < len(b.spans)-1 { // Try to move from one span to the next.
b.span++ b.span++
b.idx += int(b.spans[b.span].Offset + 1) b.idx += int(b.spans[b.span].Offset + 1)
b.bucket = 0 b.bucket = 0
if b.spans[b.span].Length == 0 { if b.spans[b.span].Length == 0 {
// Pathological case that should never happen. We can't use this span, let's try again. b.idx--
goto try continue
} }
return b.idx, true return b.idx, true
} }
// We're out of options. // We're out of options.
return 0, false return 0, false
} }
// An Interjection describes how many new buckets have to be introduced before // An Insert describes how many new buckets have to be inserted before
// processing the pos'th delta from the original slice. // processing the pos'th bucket from the original slice.
type Interjection struct { type Insert struct {
pos int pos int
num int num int
} }
// compareSpans returns the interjections to convert a slice of deltas to a new // expandSpansForward returns the inserts to expand the bucket spans 'a' so that
// slice representing an expanded set of buckets, or false if incompatible // they match the spans in 'b'. 'b' must cover the same or more buckets than
// (e.g. if buckets were removed). // 'a', otherwise the function will return false.
// //
// Example: // Example:
// //
@ -220,25 +222,25 @@ type Interjection struct {
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 // deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
// delta mods: / \ / \ / \ // delta mods: / \ / \ / \
// //
// Note that whenever any new buckets are introduced, the subsequent "old" // Note for histograms with delta-encoded buckets: Whenever any new buckets are
// bucket needs to readjust its delta to the new base of 0. Thus, for the caller // introduced, the subsequent "old" bucket needs to readjust its delta to the
// who wants to transform the set of original deltas to a new set of deltas to // new base of 0. Thus, for the caller who wants to transform the set of
// match a new span layout that adds buckets, we simply need to generate a list // original deltas to a new set of deltas to match a new span layout that adds
// of interjections. // buckets, we simply need to generate a list of inserts.
// //
// Note: Within compareSpans we don't have to worry about the changes to the // Note: Within expandSpansForward we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful // spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to // bucket indices (which of course directly correspond to the buckets we have to
// adjust). // adjust).
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) { func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) {
ai := newBucketIterator(a) ai := newBucketIterator(a)
bi := newBucketIterator(b) bi := newBucketIterator(b)
var interjections []Interjection var inserts []Insert
// When inter.num becomes > 0, this becomes a valid interjection that // When inter.num becomes > 0, this becomes a valid insert that should
// should be yielded when we finish a streak of new buckets. // be yielded when we finish a streak of new buckets.
var inter Interjection var inter Insert
av, aOK := ai.Next() av, aOK := ai.Next()
bv, bOK := bi.Next() bv, bOK := bi.Next()
@ -248,87 +250,240 @@ loop:
case aOK && bOK: case aOK && bOK:
switch { switch {
case av == bv: // Both have an identical value. move on! case av == bv: // Both have an identical value. move on!
// Finish WIP interjection and reset. // Finish WIP insert and reset.
if inter.num > 0 { if inter.num > 0 {
interjections = append(interjections, inter) inserts = append(inserts, inter)
} }
inter.num = 0 inter.num = 0
av, aOK = ai.Next() av, aOK = ai.Next()
bv, bOK = bi.Next() bv, bOK = bi.Next()
inter.pos++ inter.pos++
case av < bv: // b misses a value that is in a. case av < bv: // b misses a value that is in a.
return interjections, false return inserts, false
case av > bv: // a misses a value that is in b. Forward b and recompare. case av > bv: // a misses a value that is in b. Forward b and recompare.
inter.num++ inter.num++
bv, bOK = bi.Next() bv, bOK = bi.Next()
} }
case aOK && !bOK: // b misses a value that is in a. case aOK && !bOK: // b misses a value that is in a.
return interjections, false return inserts, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
inter.num++ inter.num++
bv, bOK = bi.Next() bv, bOK = bi.Next()
default: // Both iterators ran out. We're done. default: // Both iterators ran out. We're done.
if inter.num > 0 { if inter.num > 0 {
interjections = append(interjections, inter) inserts = append(inserts, inter)
} }
break loop break loop
} }
} }
return interjections, true return inserts, true
} }
// interject merges 'in' with the provided interjections and writes them into // expandSpansBothWays is similar to expandSpansForward, but now b may also
// 'out', which must already have the appropriate length. // cover an entirely different set of buckets. The function returns the
func interject(in, out []int64, interjections []Interjection) []int64 { // “forward” inserts to expand 'a' to also cover all the buckets exclusively
// covered by 'b', and it returns the “backward” inserts to expand 'b' to also
// cover all the buckets exclusively covered by 'a'
func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var fInserts, bInserts []Insert
var lastBucket int
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
// When fInter.num (or bInter.num, respectively) becomes > 0, this
// becomes a valid insert that should be yielded when we finish a streak
// of new buckets.
var fInter, bInter Insert
av, aOK := ai.Next()
bv, bOK := bi.Next()
loop:
for {
switch {
case aOK && bOK:
switch {
case av == bv: // Both have an identical value. move on!
// Finish WIP insert and reset.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
fInter.num = 0
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
addBucket(av)
av, aOK = ai.Next()
bv, bOK = bi.Next()
fInter.pos++
bInter.pos++
case av < bv: // b misses a value that is in a.
bInter.num++
// Collect the forward inserts before advancing
// the position of 'a'.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
fInter.num = 0
}
addBucket(av)
fInter.pos++
av, aOK = ai.Next()
case av > bv: // a misses a value that is in b. Forward b and recompare.
fInter.num++
// Collect the backward inserts before advancing the
// position of 'b'.
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
addBucket(bv)
bInter.pos++
bv, bOK = bi.Next()
}
case aOK && !bOK: // b misses a value that is in a.
bInter.num++
addBucket(av)
av, aOK = ai.Next()
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
fInter.num++
addBucket(bv)
bv, bOK = bi.Next()
default: // Both iterators ran out. We're done.
if fInter.num > 0 {
fInserts = append(fInserts, fInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return fInserts, bInserts, mergedSpans
}
type bucketValue interface {
int64 | float64
}
// insert merges 'in' with the provided inserts and writes them into 'out',
// which must already have the appropriate length. 'out' is also returned for
// convenience.
func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV {
var ( var (
j int // Position in out. oi int // Position in out.
v int64 // The last value seen. v BV // The last value seen.
interj int // The next interjection to process. ii int // The next insert to process.
) )
for i, d := range in { for i, d := range in {
if interj < len(interjections) && i == interjections[interj].pos { if ii < len(inserts) && i == inserts[ii].pos {
// We have an insert!
// We have an interjection! // Add insert.num new delta values such that their
// Add interjection.num new delta values such that their // bucket values equate 0. When deltas==false, it means
// bucket values equate 0. // that it is an absolute value. So we set it to 0
out[j] = int64(-v) // directly.
j++ if deltas {
for x := 1; x < interjections[interj].num; x++ { out[oi] = -v
out[j] = 0 } else {
j++ out[oi] = 0
} }
interj++ oi++
for x := 1; x < inserts[ii].num; x++ {
out[oi] = 0
oi++
}
ii++
// Now save the value from the input. The delta value we // Now save the value from the input. The delta value we
// should save is the original delta value + the last // should save is the original delta value + the last
// value of the point before the interjection (to undo // value of the point before the insert (to undo the
// the delta that was introduced by the interjection). // delta that was introduced by the insert). When
out[j] = d + v // deltas==false, it means that it is an absolute value,
j++ // so we set it directly to the value in the 'in' slice.
if deltas {
out[oi] = d + v
} else {
out[oi] = d
}
oi++
v = d + v v = d + v
continue continue
} }
// If there was no insert, the original delta is still valid.
// If there was no interjection, the original delta is still out[oi] = d
// valid. oi++
out[j] = d
j++
v += d v += d
} }
switch interj { switch ii {
case len(interjections): case len(inserts):
// All interjections processed. Nothing more to do. // All inserts processed. Nothing more to do.
case len(interjections) - 1: case len(inserts) - 1:
// One more interjection to process at the end. // One more insert to process at the end.
out[j] = int64(-v) if deltas {
j++ out[oi] = -v
for x := 1; x < interjections[interj].num; x++ { } else {
out[j] = 0 out[oi] = 0
j++ }
oi++
for x := 1; x < inserts[ii].num; x++ {
out[oi] = 0
oi++
} }
default: default:
panic("unprocessed interjections left") panic("unprocessed inserts left")
} }
return out return out
} }
// counterResetHint returns a CounterResetHint based on the CounterResetHeader
// and on the position into the chunk.
func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterResetHint {
switch {
case crh == GaugeType:
// A gauge histogram chunk only contains gauge histograms.
return histogram.GaugeType
case numRead > 1:
// In a counter histogram chunk, there will not be any counter
// resets after the first histogram.
return histogram.NotCounterReset
case crh == CounterReset:
// If the chunk was started because of a counter reset, we can
// safely return that hint. This histogram always has to be
// treated as a counter reset.
return histogram.CounterReset
default:
// Sadly, we have to return "unknown" as the hint for all other
// cases, even if we know that the chunk was started without a
// counter reset. But we cannot be sure that the previous chunk
// still exists in the TSDB, so we conservatively return
// "unknown". On the bright side, this case should be relatively
// rare.
//
// TODO(beorn7): Nevertheless, if the current chunk is in the
// middle of a block (not the first chunk in the block for this
// series), it's probably safe to assume that the previous chunk
// will exist in the TSDB for as long as the current chunk
// exist, and we could safely return
// "histogram.NotCounterReset". This needs some more work and
// might not be worth the effort and/or risk. To be vetted...
return histogram.UnknownCounterReset
}
}

View file

@ -156,6 +156,10 @@ func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) {
panic("appended a histogram to an xor chunk") panic("appended a histogram to an xor chunk")
} }
func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) {
panic("appended a float histogram to an xor chunk")
}
func (a *xorAppender) Append(t int64, v float64) { func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64 var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes()) num := binary.BigEndian.Uint16(a.b.bytes())

View file

@ -746,8 +746,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
} }
var ( var (
ref = storage.SeriesRef(0) ref = storage.SeriesRef(0)
chks []chunks.Meta chks []chunks.Meta
chksIter chunks.Iterator
) )
set := sets[0] set := sets[0]
@ -765,7 +766,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
default: default:
} }
s := set.At() s := set.At()
chksIter := s.Iterator() chksIter = s.Iterator(chksIter)
chks = chks[:0] chks = chks[:0]
for chksIter.Next() { for chksIter.Next() {
// We are not iterating in streaming way over chunk as // We are not iterating in streaming way over chunk as

View file

@ -1002,7 +1002,7 @@ func (a dbAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef,
if g, ok := a.Appender.(storage.GetRef); ok { if g, ok := a.Appender.(storage.GetRef); ok {
return g.GetRef(lset, hash) return g.GetRef(lset, hash)
} }
return 0, nil return 0, labels.EmptyLabels()
} }
func (a dbAppender) Commit() error { func (a dbAppender) Commit() error {

View file

@ -226,13 +226,16 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp
// Exemplar label length does not include chars involved in text rendering such as quotes // Exemplar label length does not include chars involved in text rendering such as quotes
// equals sign, or commas. See definition of const ExemplarMaxLabelLength. // equals sign, or commas. See definition of const ExemplarMaxLabelLength.
labelSetLen := 0 labelSetLen := 0
for _, l := range e.Labels { if err := e.Labels.Validate(func(l labels.Label) error {
labelSetLen += utf8.RuneCountInString(l.Name) labelSetLen += utf8.RuneCountInString(l.Name)
labelSetLen += utf8.RuneCountInString(l.Value) labelSetLen += utf8.RuneCountInString(l.Value)
if labelSetLen > exemplar.ExemplarMaxLabelSetLength { if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
return storage.ErrExemplarLabelLength return storage.ErrExemplarLabelLength
} }
return nil
}); err != nil {
return err
} }
idx, ok := ce.index[string(key)] idx, ok := ce.index[string(key)]

View file

@ -17,6 +17,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"math/rand"
"path/filepath" "path/filepath"
"sync" "sync"
"time" "time"
@ -74,19 +75,20 @@ type Head struct {
// This should be typecasted to chunks.ChunkDiskMapperRef after loading. // This should be typecasted to chunks.ChunkDiskMapperRef after loading.
minOOOMmapRef atomic.Uint64 minOOOMmapRef atomic.Uint64
metrics *headMetrics metrics *headMetrics
opts *HeadOptions opts *HeadOptions
wal, wbl *wlog.WL wal, wbl *wlog.WL
exemplarMetrics *ExemplarMetrics exemplarMetrics *ExemplarMetrics
exemplars ExemplarStorage exemplars ExemplarStorage
logger log.Logger logger log.Logger
appendPool sync.Pool appendPool sync.Pool
exemplarsPool sync.Pool exemplarsPool sync.Pool
histogramsPool sync.Pool histogramsPool sync.Pool
metadataPool sync.Pool floatHistogramsPool sync.Pool
seriesPool sync.Pool metadataPool sync.Pool
bytesPool sync.Pool seriesPool sync.Pool
memChunkPool sync.Pool bytesPool sync.Pool
memChunkPool sync.Pool
// All series addressable by their ID or hash. // All series addressable by their ID or hash.
series *stripeSeries series *stripeSeries
@ -666,7 +668,7 @@ func (h *Head) Init(minValidTime int64) error {
offset = snapOffset offset = snapOffset
} }
sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s) sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s)
if errors.Cause(err) == io.EOF { if errors.Is(err, io.EOF) {
// File does not exist. // File does not exist.
continue continue
} }
@ -761,7 +763,11 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
h.metrics.chunks.Inc() h.metrics.chunks.Inc()
h.metrics.chunksCreated.Inc() h.metrics.chunksCreated.Inc()
ms.oooMmappedChunks = append(ms.oooMmappedChunks, &mmappedChunk{ if ms.ooo == nil {
ms.ooo = &memSeriesOOOFields{}
}
ms.ooo.oooMmappedChunks = append(ms.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef, ref: chunkRef,
minTime: mint, minTime: mint,
maxTime: maxt, maxTime: maxt,
@ -1664,24 +1670,24 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
minMmapFile = seq minMmapFile = seq
} }
} }
if len(series.oooMmappedChunks) > 0 { if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 {
seq, _ := series.oooMmappedChunks[0].ref.Unpack() seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack()
if seq < minMmapFile { if seq < minMmapFile {
minMmapFile = seq minMmapFile = seq
} }
for _, ch := range series.oooMmappedChunks { for _, ch := range series.ooo.oooMmappedChunks {
if ch.minTime < minOOOTime { if ch.minTime < minOOOTime {
minOOOTime = ch.minTime minOOOTime = ch.minTime
} }
} }
} }
if series.oooHeadChunk != nil { if series.ooo != nil && series.ooo.oooHeadChunk != nil {
if series.oooHeadChunk.minTime < minOOOTime { if series.ooo.oooHeadChunk.minTime < minOOOTime {
minOOOTime = series.oooHeadChunk.minTime minOOOTime = series.ooo.oooHeadChunk.minTime
} }
} }
if len(series.mmappedChunks) > 0 || len(series.oooMmappedChunks) > 0 || if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit ||
series.headChunk != nil || series.oooHeadChunk != nil || series.pendingCommit { (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) {
seriesMint := series.minTime() seriesMint := series.minTime()
if seriesMint < actualMint { if seriesMint < actualMint {
actualMint = seriesMint actualMint = seriesMint
@ -1838,9 +1844,7 @@ type memSeries struct {
headChunk *memChunk // Most recent chunk in memory that's still being built. headChunk *memChunk // Most recent chunk in memory that's still being built.
firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0]
oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples. ooo *memSeriesOOOFields
oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built.
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]
mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay.
@ -1850,7 +1854,8 @@ type memSeries struct {
lastValue float64 lastValue float64
// We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates. // We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates.
lastHistogramValue *histogram.Histogram lastHistogramValue *histogram.Histogram
lastFloatHistogramValue *histogram.FloatHistogram
// Current appender for the head chunk. Set when a new head chunk is cut. // Current appender for the head chunk. Set when a new head chunk is cut.
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit // It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
@ -1860,13 +1865,17 @@ type memSeries struct {
// txs is nil if isolation is disabled. // txs is nil if isolation is disabled.
txs *txRing txs *txRing
// TODO(beorn7): The only reason we track this is to create a staleness
// marker as either histogram or float sample. Perhaps there is a better way.
isHistogramSeries bool
pendingCommit bool // Whether there are samples waiting to be committed to this series. pendingCommit bool // Whether there are samples waiting to be committed to this series.
} }
// memSeriesOOOFields contains the fields required by memSeries
// to handle out-of-order data.
type memSeriesOOOFields struct {
oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples.
oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built.
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
}
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries {
s := &memSeries{ s := &memSeries{
lset: lset, lset: lset,
@ -1925,15 +1934,19 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD
} }
var removedOOO int var removedOOO int
if len(s.oooMmappedChunks) > 0 { if s.ooo != nil && len(s.ooo.oooMmappedChunks) > 0 {
for i, c := range s.oooMmappedChunks { for i, c := range s.ooo.oooMmappedChunks {
if c.ref.GreaterThan(minOOOMmapRef) { if c.ref.GreaterThan(minOOOMmapRef) {
break break
} }
removedOOO = i + 1 removedOOO = i + 1
} }
s.oooMmappedChunks = append(s.oooMmappedChunks[:0], s.oooMmappedChunks[removedOOO:]...) s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks[:0], s.ooo.oooMmappedChunks[removedOOO:]...)
s.firstOOOChunkID += chunks.HeadChunkID(removedOOO) s.ooo.firstOOOChunkID += chunks.HeadChunkID(removedOOO)
if len(s.ooo.oooMmappedChunks) == 0 && s.ooo.oooHeadChunk == nil {
s.ooo = nil
}
} }
return removedInOrder + removedOOO return removedInOrder + removedOOO
@ -2027,8 +2040,8 @@ func (h *Head) updateWALReplayStatusRead(current int) {
func GenerateTestHistograms(n int) (r []*histogram.Histogram) { func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
r = append(r, &histogram.Histogram{ h := histogram.Histogram{
Count: 5 + uint64(i*4), Count: 10 + uint64(i*8),
ZeroCount: 2 + uint64(i), ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1), Sum: 18.4 * float64(i+1),
@ -2038,6 +2051,93 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
{Offset: 1, Length: 2}, {Offset: 1, Length: 2},
}, },
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
}
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, &h)
}
return r
}
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
for x := 0; x < n; x++ {
i := rand.Intn(n)
r = append(r, &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 10 + uint64(i*8),
ZeroCount: 2 + uint64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
})
}
return r
}
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := 0; i < n; i++ {
h := histogram.FloatHistogram{
Count: 10 + float64(i*8),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
}
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
r = append(r, &h)
}
return r
}
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for x := 0; x < n; x++ {
i := rand.Intn(n)
r = append(r, &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 10 + float64(i*8),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
}) })
} }

View file

@ -68,14 +68,14 @@ func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e
return a.app.AppendExemplar(ref, l, e) return a.app.AppendExemplar(ref, l, e)
} }
func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if a.app != nil { if a.app != nil {
return a.app.AppendHistogram(ref, l, t, h) return a.app.AppendHistogram(ref, l, t, h, fh)
} }
a.head.initTime(t) a.head.initTime(t)
a.app = a.head.appender() a.app = a.head.appender()
return a.app.AppendHistogram(ref, l, t, h) return a.app.AppendHistogram(ref, l, t, h, fh)
} }
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
@ -102,7 +102,7 @@ func (a *initAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRe
if g, ok := a.app.(storage.GetRef); ok { if g, ok := a.app.(storage.GetRef); ok {
return g.GetRef(lset, hash) return g.GetRef(lset, hash)
} }
return 0, nil return 0, labels.EmptyLabels()
} }
func (a *initAppender) Commit() error { func (a *initAppender) Commit() error {
@ -156,6 +156,7 @@ func (h *Head) appender() *headAppender {
sampleSeries: h.getSeriesBuffer(), sampleSeries: h.getSeriesBuffer(),
exemplars: exemplarsBuf, exemplars: exemplarsBuf,
histograms: h.getHistogramBuffer(), histograms: h.getHistogramBuffer(),
floatHistograms: h.getFloatHistogramBuffer(),
metadata: h.getMetadataBuffer(), metadata: h.getMetadataBuffer(),
appendID: appendID, appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow, cleanupAppendIDsBelow: cleanupAppendIDsBelow,
@ -236,6 +237,19 @@ func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
h.histogramsPool.Put(b[:0]) h.histogramsPool.Put(b[:0])
} }
func (h *Head) getFloatHistogramBuffer() []record.RefFloatHistogramSample {
b := h.floatHistogramsPool.Get()
if b == nil {
return make([]record.RefFloatHistogramSample, 0, 512)
}
return b.([]record.RefFloatHistogramSample)
}
func (h *Head) putFloatHistogramBuffer(b []record.RefFloatHistogramSample) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.floatHistogramsPool.Put(b[:0])
}
func (h *Head) getMetadataBuffer() []record.RefMetadata { func (h *Head) getMetadataBuffer() []record.RefMetadata {
b := h.metadataPool.Get() b := h.metadataPool.Get()
if b == nil { if b == nil {
@ -287,14 +301,16 @@ type headAppender struct {
headMaxt int64 // We track it here to not take the lock for every sample appended. headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample. oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
series []record.RefSeries // New series held by this appender. series []record.RefSeries // New series held by this appender.
samples []record.RefSample // New float samples held by this appender. samples []record.RefSample // New float samples held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender. sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). histograms []record.RefHistogramSample // New histogram samples held by this appender.
histograms []record.RefHistogramSample // New histogram samples held by this appender. histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
metadata []record.RefMetadata // New metadata held by this appender. floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
appendID, cleanupAppendIDsBelow uint64 appendID, cleanupAppendIDsBelow uint64
closed bool closed bool
@ -312,7 +328,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
if s == nil { if s == nil {
// Ensure no empty labels have gotten through. // Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty() lset = lset.WithoutEmpty()
if len(lset) == 0 { if lset.IsEmpty() {
return 0, errors.Wrap(ErrInvalidSample, "empty labelset") return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
} }
@ -334,8 +350,12 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
} }
} }
if value.IsStaleNaN(v) && s.isHistogramSeries { if value.IsStaleNaN(v) {
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}) if s.lastHistogramValue != nil {
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
} else if s.lastFloatHistogramValue != nil {
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
}
} }
s.Lock() s.Lock()
@ -439,6 +459,28 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
return nil return nil
} }
// appendableFloatHistogram checks whether the given sample is valid for appending to the series.
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error {
c := s.head()
if c == nil {
return nil
}
if t > c.maxTime {
return nil
}
if t < c.maxTime {
return storage.ErrOutOfOrderSample
}
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
if !fh.Equals(s.lastFloatHistogramValue) {
return storage.ErrDuplicateSampleForTimestamp
}
return nil
}
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
// use getOrCreate or make any of the lset validity checks that Append does. // use getOrCreate or make any of the lset validity checks that Append does.
func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
@ -476,7 +518,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if !a.head.opts.EnableNativeHistograms.Load() { if !a.head.opts.EnableNativeHistograms.Load() {
return 0, storage.ErrNativeHistogramsDisabled return 0, storage.ErrNativeHistogramsDisabled
} }
@ -486,15 +528,23 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return 0, storage.ErrOutOfBounds return 0, storage.ErrOutOfBounds
} }
if err := ValidateHistogram(h); err != nil { if h != nil {
return 0, err if err := ValidateHistogram(h); err != nil {
return 0, err
}
}
if fh != nil {
if err := ValidateFloatHistogram(fh); err != nil {
return 0, err
}
} }
s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil { if s == nil {
// Ensure no empty labels have gotten through. // Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty() lset = lset.WithoutEmpty()
if len(lset) == 0 { if lset.IsEmpty() {
return 0, errors.Wrap(ErrInvalidSample, "empty labelset") return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
} }
@ -508,8 +558,12 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
if err != nil { if err != nil {
return 0, err return 0, err
} }
s.isHistogramSeries = true
if created { if created {
if h != nil {
s.lastHistogramValue = &histogram.Histogram{}
} else if fh != nil {
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
}
a.series = append(a.series, record.RefSeries{ a.series = append(a.series, record.RefSeries{
Ref: s.ref, Ref: s.ref,
Labels: lset, Labels: lset,
@ -517,16 +571,41 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
} }
s.Lock() if h != nil {
if err := s.appendableHistogram(t, h); err != nil { s.Lock()
s.Unlock() if err := s.appendableHistogram(t, h); err != nil {
if err == storage.ErrOutOfOrderSample { s.Unlock()
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() if err == storage.ErrOutOfOrderSample {
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
} }
return 0, err s.pendingCommit = true
s.Unlock()
a.histograms = append(a.histograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
H: h,
})
a.histogramSeries = append(a.histogramSeries, s)
} else if fh != nil {
s.Lock()
if err := s.appendableFloatHistogram(t, fh); err != nil {
s.Unlock()
if err == storage.ErrOutOfOrderSample {
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
}
s.pendingCommit = true
s.Unlock()
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: t,
FH: fh,
})
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
} }
s.pendingCommit = true
s.Unlock()
if t < a.mint { if t < a.mint {
a.mint = t a.mint = t
@ -535,12 +614,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
a.maxt = t a.maxt = t
} }
a.histograms = append(a.histograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
H: h,
})
a.histogramSeries = append(a.histogramSeries, s)
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
@ -582,17 +655,17 @@ func ValidateHistogram(h *histogram.Histogram) error {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return errors.Wrap(err, "positive side") return errors.Wrap(err, "positive side")
} }
var nCount, pCount uint64
negativeCount, err := checkHistogramBuckets(h.NegativeBuckets) err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
if err != nil { if err != nil {
return errors.Wrap(err, "negative side") return errors.Wrap(err, "negative side")
} }
positiveCount, err := checkHistogramBuckets(h.PositiveBuckets) err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil { if err != nil {
return errors.Wrap(err, "positive side") return errors.Wrap(err, "positive side")
} }
if c := negativeCount + positiveCount; c > h.Count { if c := nCount + pCount; c > h.Count {
return errors.Wrap( return errors.Wrap(
storage.ErrHistogramCountNotBigEnough, storage.ErrHistogramCountNotBigEnough,
fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count), fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count),
@ -602,6 +675,33 @@ func ValidateHistogram(h *histogram.Histogram) error {
return nil return nil
} }
func ValidateFloatHistogram(h *histogram.FloatHistogram) error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return errors.Wrap(err, "negative side")
}
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return errors.Wrap(err, "positive side")
}
var nCount, pCount float64
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
if err != nil {
return errors.Wrap(err, "negative side")
}
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil {
return errors.Wrap(err, "positive side")
}
if c := nCount + pCount; c > h.Count {
return errors.Wrap(
storage.ErrHistogramCountNotBigEnough,
fmt.Sprintf("%f observations found in buckets, but the Count field is %f", c, h.Count),
)
}
return nil
}
func checkHistogramSpans(spans []histogram.Span, numBuckets int) error { func checkHistogramSpans(spans []histogram.Span, numBuckets int) error {
var spanBuckets int var spanBuckets int
for n, span := range spans { for n, span := range spans {
@ -622,27 +722,30 @@ func checkHistogramSpans(spans []histogram.Span, numBuckets int) error {
return nil return nil
} }
func checkHistogramBuckets(buckets []int64) (uint64, error) { func checkHistogramBuckets[BC histogram.BucketCount, IBC histogram.InternalBucketCount](buckets []IBC, count *BC, deltas bool) error {
if len(buckets) == 0 { if len(buckets) == 0 {
return 0, nil return nil
} }
var count uint64 var last IBC
var last int64
for i := 0; i < len(buckets); i++ { for i := 0; i < len(buckets); i++ {
c := last + buckets[i] var c IBC
if deltas {
c = last + buckets[i]
} else {
c = buckets[i]
}
if c < 0 { if c < 0 {
return 0, errors.Wrap( return errors.Wrap(
storage.ErrHistogramNegativeBucketCount, storage.ErrHistogramNegativeBucketCount,
fmt.Sprintf("bucket number %d has observation count of %d", i+1, c), fmt.Sprintf("bucket number %d has observation count of %v", i+1, c),
) )
} }
last = c last = c
count += uint64(c) *count += BC(c)
} }
return count, nil return nil
} }
var _ storage.GetRef = &headAppender{} var _ storage.GetRef = &headAppender{}
@ -650,7 +753,7 @@ var _ storage.GetRef = &headAppender{}
func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
s := a.head.series.getByHash(hash, lset) s := a.head.series.getByHash(hash, lset)
if s == nil { if s == nil {
return 0, nil return 0, labels.EmptyLabels()
} }
// returned labels must be suitable to pass to Append() // returned labels must be suitable to pass to Append()
return storage.SeriesRef(s.ref), s.lset return storage.SeriesRef(s.ref), s.lset
@ -707,6 +810,13 @@ func (a *headAppender) log() error {
return errors.Wrap(err, "log histograms") return errors.Wrap(err, "log histograms")
} }
} }
if len(a.floatHistograms) > 0 {
rec = enc.FloatHistogramSamples(a.floatHistograms, buf)
buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil {
return errors.Wrap(err, "log float histograms")
}
}
return nil return nil
} }
@ -753,6 +863,7 @@ func (a *headAppender) Commit() (err error) {
defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putSeriesBuffer(a.sampleSeries)
defer a.head.putExemplarBuffer(a.exemplars) defer a.head.putExemplarBuffer(a.exemplars)
defer a.head.putHistogramBuffer(a.histograms) defer a.head.putHistogramBuffer(a.histograms)
defer a.head.putFloatHistogramBuffer(a.floatHistograms)
defer a.head.putMetadataBuffer(a.metadata) defer a.head.putMetadataBuffer(a.metadata)
defer a.head.iso.closeAppend(a.appendID) defer a.head.iso.closeAppend(a.appendID)
@ -924,6 +1035,32 @@ func (a *headAppender) Commit() (err error) {
} }
} }
histogramsTotal += len(a.floatHistograms)
for i, s := range a.floatHistograms {
series = a.floatHistogramSeries[i]
series.Lock()
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange)
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false
series.Unlock()
if ok {
if s.T < inOrderMint {
inOrderMint = s.T
}
if s.T > inOrderMaxt {
inOrderMaxt = s.T
}
} else {
histogramsTotal--
histoOOORejected++
}
if chunkCreated {
a.head.metrics.chunks.Inc()
a.head.metrics.chunksCreated.Inc()
}
}
for i, m := range a.metadata { for i, m := range a.metadata {
series = a.metadataSeries[i] series = a.metadataSeries[i]
series.Lock() series.Lock()
@ -956,7 +1093,10 @@ func (a *headAppender) Commit() (err error) {
// insert is like append, except it inserts. Used for OOO samples. // insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) {
c := s.oooHeadChunk if s.ooo == nil {
s.ooo = &memSeriesOOOFields{}
}
c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) { if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
@ -985,11 +1125,12 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
return sampleInOrder, chunkCreated return sampleInOrder, chunkCreated
} }
s.app.Append(t, v) s.app.Append(t, v)
s.isHistogramSeries = false
c.maxTime = t c.maxTime = t
s.lastValue = v s.lastValue = v
s.lastHistogramValue = nil
s.lastFloatHistogramValue = nil
if appendID > 0 { if appendID > 0 {
s.txs.add(appendID) s.txs.add(appendID)
@ -1002,39 +1143,60 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper // Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable before // chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk, // appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the // we need to know if there was also a counter reset or not to set the
// meta properly. // meta properly.
app, _ := s.app.(*chunkenc.HistogramAppender) app, _ := s.app.(*chunkenc.HistogramAppender)
var ( var (
positiveInterjections, negativeInterjections []chunkenc.Interjection pForwardInserts, nForwardInserts []chunkenc.Insert
okToAppend, counterReset bool pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset, gauge bool
) )
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
if !sampleInOrder { if !sampleInOrder {
return sampleInOrder, chunkCreated return sampleInOrder, chunkCreated
} }
switch h.CounterResetHint {
if app != nil { case histogram.GaugeType:
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h) gauge = true
if app != nil {
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(h)
}
case histogram.CounterReset:
// The caller tells us this is a counter reset, even if it
// doesn't look like one.
counterReset = true
default:
if app != nil {
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(h)
}
} }
if !chunkCreated { if !chunkCreated {
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
h.PositiveSpans = pMergedSpans
h.NegativeSpans = nMergedSpans
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here // We have 3 cases here
// - !okToAppend -> We need to cut a new chunk. // - !okToAppend -> We need to cut a new chunk.
// - okToAppend but we have interjections → Existing chunk needs // - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram. // recoding before we can append our histogram.
// - okToAppend and no interjections → Chunk is ready to support our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset { if !okToAppend || counterReset {
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 { } else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
// New buckets have appeared. We need to recode all // New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we // prior histogram samples within the chunk before we
// can process this one. // can process this one.
chunk, app := app.Recode( chunk, app := app.Recode(
positiveInterjections, negativeInterjections, pForwardInserts, nForwardInserts,
h.PositiveSpans, h.NegativeSpans, h.PositiveSpans, h.NegativeSpans,
) )
c.chunk = chunk c.chunk = chunk
@ -1045,20 +1207,116 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
if chunkCreated { if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.HistogramChunk) hc := s.headChunk.chunk.(*chunkenc.HistogramChunk)
header := chunkenc.UnknownCounterReset header := chunkenc.UnknownCounterReset
if counterReset { switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset header = chunkenc.CounterReset
} else if okToAppend { case okToAppend:
header = chunkenc.NotCounterReset header = chunkenc.NotCounterReset
} }
hc.SetCounterResetHeader(header) hc.SetCounterResetHeader(header)
} }
s.app.AppendHistogram(t, h) s.app.AppendHistogram(t, h)
s.isHistogramSeries = true
c.maxTime = t c.maxTime = t
s.lastHistogramValue = h s.lastHistogramValue = h
s.lastFloatHistogramValue = nil
if appendID > 0 {
s.txs.add(appendID)
}
return true, chunkCreated
}
// appendFloatHistogram adds the float histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
// Head controls the execution of recoding, so that we own the proper
// chunk reference afterwards. We check for Appendable from appender before
// appendPreprocessor because in case it ends up creating a new chunk,
// we need to know if there was also a counter reset or not to set the
// meta properly.
app, _ := s.app.(*chunkenc.FloatHistogramAppender)
var (
pForwardInserts, nForwardInserts []chunkenc.Insert
pBackwardInserts, nBackwardInserts []chunkenc.Insert
pMergedSpans, nMergedSpans []histogram.Span
okToAppend, counterReset, gauge bool
)
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
if !sampleInOrder {
return sampleInOrder, chunkCreated
}
switch fh.CounterResetHint {
case histogram.GaugeType:
gauge = true
if app != nil {
pForwardInserts, nForwardInserts,
pBackwardInserts, nBackwardInserts,
pMergedSpans, nMergedSpans,
okToAppend = app.AppendableGauge(fh)
}
case histogram.CounterReset:
// The caller tells us this is a counter reset, even if it
// doesn't look like one.
counterReset = true
default:
if app != nil {
pForwardInserts, nForwardInserts, okToAppend, counterReset = app.Appendable(fh)
}
}
if !chunkCreated {
if len(pBackwardInserts)+len(nBackwardInserts) > 0 {
fh.PositiveSpans = pMergedSpans
fh.NegativeSpans = nMergedSpans
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here
// - !okToAppend -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset {
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
// New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we
// can process this one.
chunk, app := app.Recode(
pForwardInserts, nForwardInserts,
fh.PositiveSpans, fh.NegativeSpans,
)
c.chunk = chunk
s.app = app
}
}
if chunkCreated {
hc := s.headChunk.chunk.(*chunkenc.FloatHistogramChunk)
header := chunkenc.UnknownCounterReset
switch {
case gauge:
header = chunkenc.GaugeType
case counterReset:
header = chunkenc.CounterReset
case okToAppend:
header = chunkenc.NotCounterReset
}
hc.SetCounterResetHeader(header)
}
s.app.AppendFloatHistogram(t, fh)
c.maxTime = t
s.lastFloatHistogramValue = fh
s.lastHistogramValue = nil
if appendID > 0 { if appendID > 0 {
s.txs.add(appendID) s.txs.add(appendID)
@ -1175,33 +1433,35 @@ func (s *memSeries) cutNewHeadChunk(
return s.headChunk return s.headChunk
} }
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
s.oooHeadChunk = &oooHeadChunk{ s.ooo.oooHeadChunk = &oooHeadChunk{
chunk: NewOOOChunk(), chunk: NewOOOChunk(),
minTime: mint, minTime: mint,
maxTime: math.MinInt64, maxTime: math.MinInt64,
} }
return s.oooHeadChunk, ref return s.ooo.oooHeadChunk, ref
} }
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef { func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef {
if s.oooHeadChunk == nil { if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// There is no head chunk, so nothing to m-map here. // There is no head chunk, so nothing to m-map here.
return 0 return 0
} }
xor, _ := s.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality. xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
oooXor := &chunkenc.OOOXORChunk{XORChunk: xor} oooXor := &chunkenc.OOOXORChunk{XORChunk: xor}
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.oooHeadChunk.minTime, s.oooHeadChunk.maxTime, oooXor, handleChunkWriteError) chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
s.oooMmappedChunks = append(s.oooMmappedChunks, &mmappedChunk{ s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef, ref: chunkRef,
numSamples: uint16(xor.NumSamples()), numSamples: uint16(xor.NumSamples()),
minTime: s.oooHeadChunk.minTime, minTime: s.ooo.oooHeadChunk.minTime,
maxTime: s.oooHeadChunk.maxTime, maxTime: s.ooo.oooHeadChunk.maxTime,
}) })
s.oooHeadChunk = nil s.ooo.oooHeadChunk = nil
return chunkRef return chunkRef
} }
@ -1254,6 +1514,7 @@ func (a *headAppender) Rollback() (err error) {
a.head.putAppendBuffer(a.samples) a.head.putAppendBuffer(a.samples)
a.head.putExemplarBuffer(a.exemplars) a.head.putExemplarBuffer(a.exemplars)
a.head.putHistogramBuffer(a.histograms) a.head.putHistogramBuffer(a.histograms)
a.head.putFloatHistogramBuffer(a.floatHistograms)
a.head.putMetadataBuffer(a.metadata) a.head.putMetadataBuffer(a.metadata)
a.samples = nil a.samples = nil
a.exemplars = nil a.exemplars = nil

View file

@ -113,7 +113,9 @@ func (h *headIndexReader) Postings(name string, values ...string) (index.Posting
default: default:
res := make([]index.Postings, 0, len(values)) res := make([]index.Postings, 0, len(values))
for _, value := range values { for _, value := range values {
res = append(res, h.head.postings.Get(name, value)) if p := h.head.postings.Get(name, value); !index.IsEmptyPostingsType(p) {
res = append(res, p)
}
} }
return index.Merge(res...), nil return index.Merge(res...), nil
} }
@ -148,14 +150,14 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
} }
// Series returns the series for the given reference. // Series returns the series for the given reference.
func (h *headIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := h.head.series.getByID(chunks.HeadSeriesRef(ref)) s := h.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil { if s == nil {
h.head.metrics.seriesNotFound.Inc() h.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound return storage.ErrNotFound
} }
*lbls = append((*lbls)[:0], s.lset...) builder.Assign(s.lset)
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
@ -194,8 +196,9 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
// oooHeadChunkID returns the HeadChunkID referred to by the given position. // oooHeadChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos] // * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk // * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstOOOChunkID return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID
} }
// LabelValueFor returns label value for the given label name in the series referred to by ID. // LabelValueFor returns label value for the given label name in the series referred to by ID.
@ -222,9 +225,9 @@ func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, err
if memSeries == nil { if memSeries == nil {
return nil, storage.ErrNotFound return nil, storage.ErrNotFound
} }
for _, lbl := range memSeries.lset { memSeries.lset.Range(func(lbl labels.Label) {
namesMap[lbl.Name] = struct{}{} namesMap[lbl.Name] = struct{}{}
} })
} }
names := make([]string, 0, len(namesMap)) names := make([]string, 0, len(namesMap))
for name := range namesMap { for name := range namesMap {
@ -347,6 +350,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
// might be a merge of all the overlapping chunks, if any, amongst all the // might be a merge of all the overlapping chunks, if any, amongst all the
// chunks in the OOOHead. // chunks in the OOOHead.
// This function is not thread safe unless the caller holds a lock. // This function is not thread safe unless the caller holds a lock.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) { func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) {
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack() _, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
@ -354,23 +358,23 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index. // incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
ix := int(cid) - int(s.firstOOOChunkID) ix := int(cid) - int(s.ooo.firstOOOChunkID)
if ix < 0 || ix > len(s.oooMmappedChunks) { if ix < 0 || ix > len(s.ooo.oooMmappedChunks) {
return nil, storage.ErrNotFound return nil, storage.ErrNotFound
} }
if ix == len(s.oooMmappedChunks) { if ix == len(s.ooo.oooMmappedChunks) {
if s.oooHeadChunk == nil { if s.ooo.oooHeadChunk == nil {
return nil, errors.New("invalid ooo head chunk") return nil, errors.New("invalid ooo head chunk")
} }
} }
// We create a temporary slice of chunk metas to hold the information of all // We create a temporary slice of chunk metas to hold the information of all
// possible chunks that may overlap with the requested chunk. // possible chunks that may overlap with the requested chunk.
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.oooMmappedChunks)) tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks))
oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
if s.oooHeadChunk != nil && s.oooHeadChunk.OverlapsClosedInterval(mint, maxt) { if s.ooo.oooHeadChunk != nil && s.ooo.oooHeadChunk.OverlapsClosedInterval(mint, maxt) {
// We only want to append the head chunk if this chunk existed when // We only want to append the head chunk if this chunk existed when
// Series() was called. This brings consistency in case new data // Series() was called. This brings consistency in case new data
// is added in between Series() and Chunk() calls. // is added in between Series() and Chunk() calls.
@ -386,7 +390,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
} }
} }
for i, c := range s.oooMmappedChunks { for i, c := range s.ooo.oooMmappedChunks {
chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
// We can skip chunks that came in later than the last known OOOLastRef. // We can skip chunks that came in later than the last known OOOLastRef.
if chunkRef > meta.OOOLastRef { if chunkRef > meta.OOOLastRef {
@ -431,11 +435,11 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
// If head chunk min and max time match the meta OOO markers // If head chunk min and max time match the meta OOO markers
// that means that the chunk has not expanded so we can append // that means that the chunk has not expanded so we can append
// it as it is. // it as it is.
if s.oooHeadChunk.minTime == meta.OOOLastMinTime && s.oooHeadChunk.maxTime == meta.OOOLastMaxTime { if s.ooo.oooHeadChunk.minTime == meta.OOOLastMinTime && s.ooo.oooHeadChunk.maxTime == meta.OOOLastMaxTime {
xor, err = s.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called. xor, err = s.ooo.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
} else { } else {
// We need to remove samples that are outside of the markers // We need to remove samples that are outside of the markers
xor, err = s.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime)
} }
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk")
@ -503,11 +507,7 @@ func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) {
} }
func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
iterators := make([]chunkenc.Iterator, 0, len(o.chunks)) return storage.ChainSampleIteratorFromMetas(iterator, o.chunks)
for _, c := range o.chunks {
iterators = append(iterators, c.Chunk.Iterator(nil))
}
return storage.NewChainSampleIterator(iterators)
} }
func (o mergedOOOChunks) NumSamples() int { func (o mergedOOOChunks) NumSamples() int {

View file

@ -29,6 +29,7 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
@ -42,6 +43,15 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
) )
// histogramRecord combines both RefHistogramSample and RefFloatHistogramSample
// to simplify the WAL replay.
type histogramRecord struct {
ref chunks.HeadSeriesRef
t int64
h *histogram.Histogram
fh *histogram.FloatHistogram
}
func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
// Track number of samples that referenced a series we don't know about // Track number of samples that referenced a series we don't know about
// for error reporting. // for error reporting.
@ -61,7 +71,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
dec record.Decoder dec record.Decoder
shards = make([][]record.RefSample, n) shards = make([][]record.RefSample, n)
histogramShards = make([][]record.RefHistogramSample, n) histogramShards = make([][]histogramRecord, n)
decoded = make(chan interface{}, 10) decoded = make(chan interface{}, 10)
decodeErr, seriesCreationErr error decodeErr, seriesCreationErr error
@ -90,6 +100,11 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
return []record.RefHistogramSample{} return []record.RefHistogramSample{}
}, },
} }
floatHistogramsPool = sync.Pool{
New: func() interface{} {
return []record.RefFloatHistogramSample{}
},
}
metadataPool = sync.Pool{ metadataPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
return []record.RefMetadata{} return []record.RefMetadata{}
@ -212,6 +227,18 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
return return
} }
decoded <- hists decoded <- hists
case record.FloatHistogramSamples:
hists := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0]
hists, err = dec.FloatHistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: errors.Wrap(err, "decode float histograms"),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- hists
case record.Metadata: case record.Metadata:
meta := metadataPool.Get().([]record.RefMetadata)[:0] meta := metadataPool.Get().([]record.RefMetadata)[:0]
meta, err := dec.Metadata(rec, meta) meta, err := dec.Metadata(rec, meta)
@ -337,7 +364,7 @@ Outer:
sam.Ref = r sam.Ref = r
} }
mod := uint64(sam.Ref) % uint64(n) mod := uint64(sam.Ref) % uint64(n)
histogramShards[mod] = append(histogramShards[mod], sam) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
if len(histogramShards[i]) > 0 { if len(histogramShards[i]) > 0 {
@ -349,6 +376,43 @@ Outer:
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification. //nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
histogramsPool.Put(v) histogramsPool.Put(v)
case []record.RefFloatHistogramSample:
samples := v
minValidTime := h.minValidTime.Load()
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
for i := 0; i < n; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if sam.T < minValidTime {
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(n)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := 0; i < n; i++ {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
floatHistogramsPool.Put(v)
case []record.RefMetadata: case []record.RefMetadata:
for _, m := range v { for _, m := range v {
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
@ -435,7 +499,14 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks))) h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks)))
mSeries.mmappedChunks = mmc mSeries.mmappedChunks = mmc
mSeries.oooMmappedChunks = oooMmc if len(oooMmc) == 0 {
mSeries.ooo = nil
} else {
if mSeries.ooo == nil {
mSeries.ooo = &memSeriesOOOFields{}
}
*mSeries.ooo = memSeriesOOOFields{oooMmappedChunks: oooMmc}
}
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 { if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64 mSeries.mmMaxTime = math.MinInt64
@ -467,12 +538,12 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
type walSubsetProcessor struct { type walSubsetProcessor struct {
input chan walSubsetProcessorInputItem input chan walSubsetProcessorInputItem
output chan []record.RefSample output chan []record.RefSample
histogramsOutput chan []record.RefHistogramSample histogramsOutput chan []histogramRecord
} }
type walSubsetProcessorInputItem struct { type walSubsetProcessorInputItem struct {
samples []record.RefSample samples []record.RefSample
histogramSamples []record.RefHistogramSample histogramSamples []histogramRecord
existingSeries *memSeries existingSeries *memSeries
walSeriesRef chunks.HeadSeriesRef walSeriesRef chunks.HeadSeriesRef
} }
@ -480,7 +551,7 @@ type walSubsetProcessorInputItem struct {
func (wp *walSubsetProcessor) setup() { func (wp *walSubsetProcessor) setup() {
wp.input = make(chan walSubsetProcessorInputItem, 300) wp.input = make(chan walSubsetProcessorInputItem, 300)
wp.output = make(chan []record.RefSample, 300) wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []record.RefHistogramSample, 300) wp.histogramsOutput = make(chan []histogramRecord, 300)
} }
func (wp *walSubsetProcessor) closeAndDrain() { func (wp *walSubsetProcessor) closeAndDrain() {
@ -502,7 +573,7 @@ func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
} }
// If there is a buffer in the output chan, return it for reuse, otherwise return nil. // If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseHistogramBuf() []record.RefHistogramSample { func (wp *walSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select { select {
case buf := <-wp.histogramsOutput: case buf := <-wp.histogramsOutput:
return buf[:0] return buf[:0]
@ -541,7 +612,6 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
if s.T <= ms.mmMaxTime { if s.T <= ms.mmMaxTime {
continue continue
} }
ms.isHistogramSeries = false
if s.T <= ms.mmMaxTime { if s.T <= ms.mmMaxTime {
continue continue
} }
@ -562,27 +632,32 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
} }
for _, s := range in.histogramSamples { for _, s := range in.histogramSamples {
if s.T < minValidTime { if s.t < minValidTime {
continue continue
} }
ms := h.series.getByID(s.Ref) ms := h.series.getByID(s.ref)
if ms == nil { if ms == nil {
unknownHistogramRefs++ unknownHistogramRefs++
continue continue
} }
ms.isHistogramSeries = true if s.t <= ms.mmMaxTime {
if s.T <= ms.mmMaxTime {
continue continue
} }
if _, chunkCreated := ms.appendHistogram(s.T, s.H, 0, h.chunkDiskMapper, chunkRange); chunkCreated { var chunkCreated bool
if s.h != nil {
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange)
} else {
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange)
}
if chunkCreated {
h.metrics.chunksCreated.Inc() h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc() h.metrics.chunks.Inc()
} }
if s.T > maxt { if s.t > maxt {
maxt = s.T maxt = s.t
} }
if s.T < mint { if s.t < mint {
mint = s.T mint = s.t
} }
} }
@ -748,7 +823,9 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
// chunk size parameters, we are not taking care of that here. // chunk size parameters, we are not taking care of that here.
// TODO(codesome): see if there is a way to avoid duplicate m-map chunks if // TODO(codesome): see if there is a way to avoid duplicate m-map chunks if
// the size of ooo chunk was reduced between restart. // the size of ooo chunk was reduced between restart.
ms.oooHeadChunk = nil if ms.ooo != nil {
ms.ooo.oooHeadChunk = nil
}
processors[idx].mx.Unlock() processors[idx].mx.Unlock()
} }

View file

@ -423,7 +423,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...
return errors.Errorf("out-of-order series added with label set %q", lset) return errors.Errorf("out-of-order series added with label set %q", lset)
} }
if ref < w.lastRef && len(w.lastSeries) != 0 { if ref < w.lastRef && !w.lastSeries.IsEmpty() {
return errors.Errorf("series with reference greater than %d already added", ref) return errors.Errorf("series with reference greater than %d already added", ref)
} }
// We add padding to 16 bytes to increase the addressable space we get through 4 byte // We add padding to 16 bytes to increase the addressable space we get through 4 byte
@ -437,9 +437,9 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...
} }
w.buf2.Reset() w.buf2.Reset()
w.buf2.PutUvarint(len(lset)) w.buf2.PutUvarint(lset.Len())
for _, l := range lset { if err := lset.Validate(func(l labels.Label) error {
var err error var err error
cacheEntry, ok := w.symbolCache[l.Name] cacheEntry, ok := w.symbolCache[l.Name]
nameIndex := cacheEntry.index nameIndex := cacheEntry.index
@ -465,6 +465,9 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...
} }
} }
w.buf2.PutUvarint32(valueIndex) w.buf2.PutUvarint32(valueIndex)
return nil
}); err != nil {
return err
} }
w.buf2.PutUvarint(len(chunks)) w.buf2.PutUvarint(len(chunks))
@ -496,7 +499,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...
return errors.Wrap(err, "write series data") return errors.Wrap(err, "write series data")
} }
w.lastSeries = append(w.lastSeries[:0], lset...) w.lastSeries.CopyFrom(lset)
w.lastRef = ref w.lastRef = ref
return nil return nil
@ -1593,8 +1596,8 @@ func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, erro
return value, nil return value, nil
} }
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks. // Series reads the series with the given ID and writes its labels and chunks into builder and chks.
func (r *Reader) Series(id storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
offset := id offset := id
// In version 2 series IDs are no longer exact references but series are 16-byte padded // In version 2 series IDs are no longer exact references but series are 16-byte padded
// and the ID is the multiple of 16 of the actual position. // and the ID is the multiple of 16 of the actual position.
@ -1605,7 +1608,7 @@ func (r *Reader) Series(id storage.SeriesRef, lbls *labels.Labels, chks *[]chunk
if d.Err() != nil { if d.Err() != nil {
return d.Err() return d.Err()
} }
return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series") return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series")
} }
func (r *Reader) Postings(name string, values ...string) (Postings, error) { func (r *Reader) Postings(name string, values ...string) (Postings, error) {
@ -1640,6 +1643,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) {
return EmptyPostings(), nil return EmptyPostings(), nil
} }
slices.Sort(values) // Values must be in order so we can step through the table on disk.
res := make([]Postings, 0, len(values)) res := make([]Postings, 0, len(values))
skip := 0 skip := 0
valueIndex := 0 valueIndex := 0
@ -1832,9 +1836,10 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
return "", d.Err() return "", d.Err()
} }
// Series decodes a series entry from the given byte slice into lset and chks. // Series decodes a series entry from the given byte slice into builder and chks.
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error { // Previous contents of builder can be overwritten - make sure you copy before retaining.
*lbls = (*lbls)[:0] func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
builder.Reset()
*chks = (*chks)[:0] *chks = (*chks)[:0]
d := encoding.Decbuf{B: b} d := encoding.Decbuf{B: b}
@ -1858,7 +1863,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e
return errors.Wrap(err, "lookup label value") return errors.Wrap(err, "lookup label value")
} }
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) builder.Add(ln, lv)
} }
// Read the chunks meta data. // Read the chunks meta data.

View file

@ -353,9 +353,9 @@ func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) {
p.mtx.Lock() p.mtx.Lock()
for _, l := range lset { lset.Range(func(l labels.Label) {
p.addFor(id, l) p.addFor(id, l)
} })
p.addFor(id, allPostingsKey) p.addFor(id, allPostingsKey)
p.mtx.Unlock() p.mtx.Unlock()
@ -428,6 +428,13 @@ func EmptyPostings() Postings {
return emptyPostings return emptyPostings
} }
// IsEmptyPostingsType returns true if the postings are an empty postings list.
// When this function returns false, it doesn't mean that the postings isn't empty
// (it could be an empty intersection of two non-empty postings, for example).
func IsEmptyPostingsType(p Postings) bool {
return p == emptyPostings
}
// ErrPostings returns new postings that immediately error. // ErrPostings returns new postings that immediately error.
func ErrPostings(err error) Postings { func ErrPostings(err error) Postings {
return errPostings{err} return errPostings{err}

View file

@ -36,6 +36,15 @@ func NewOOOChunk() *OOOChunk {
// Insert inserts the sample such that order is maintained. // Insert inserts the sample such that order is maintained.
// Returns false if insert was not possible due to the same timestamp already existing. // Returns false if insert was not possible due to the same timestamp already existing.
func (o *OOOChunk) Insert(t int64, v float64) bool { func (o *OOOChunk) Insert(t int64, v float64) bool {
// Although out-of-order samples can be out-of-order amongst themselves, we
// are opinionated and expect them to be usually in-order meaning we could
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
o.samples = append(o.samples, sample{t, v, nil, nil})
return true
}
// Find index of sample we should replace. // Find index of sample we should replace.
i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t }) i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t })
@ -45,6 +54,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
return true return true
} }
// Duplicate sample for timestamp is not allowed.
if o.samples[i].t == t { if o.samples[i].t == t {
return false return false
} }

View file

@ -47,21 +47,21 @@ func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader {
return &OOOHeadIndexReader{hr} return &OOOHeadIndexReader{hr}
} }
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return oh.series(ref, lbls, chks, 0) return oh.series(ref, builder, chks, 0)
} }
// The passed lastMmapRef tells upto what max m-map chunk that we can consider. // The passed lastMmapRef tells upto what max m-map chunk that we can consider.
// If it is 0, it means all chunks need to be considered. // If it is 0, it means all chunks need to be considered.
// If it is non-0, then the oooHeadChunk must not be considered. // If it is non-0, then the oooHeadChunk must not be considered.
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error { func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error {
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil { if s == nil {
oh.head.metrics.seriesNotFound.Inc() oh.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound return storage.ErrNotFound
} }
*lbls = append((*lbls)[:0], s.lset...) builder.Assign(s.lset)
if chks == nil { if chks == nil {
return nil return nil
@ -71,7 +71,11 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels,
defer s.Unlock() defer s.Unlock()
*chks = (*chks)[:0] *chks = (*chks)[:0]
tmpChks := make([]chunks.Meta, 0, len(s.oooMmappedChunks)) if s.ooo == nil {
return nil
}
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
// We define these markers to track the last chunk reference while we // We define these markers to track the last chunk reference while we
// fill the chunk meta. // fill the chunk meta.
@ -103,15 +107,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels,
// Collect all chunks that overlap the query range, in order from most recent to most old, // Collect all chunks that overlap the query range, in order from most recent to most old,
// so we can set the correct markers. // so we can set the correct markers.
if s.oooHeadChunk != nil { if s.ooo.oooHeadChunk != nil {
c := s.oooHeadChunk c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 { if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
addChunk(c.minTime, c.maxTime, ref) addChunk(c.minTime, c.maxTime, ref)
} }
} }
for i := len(s.oooMmappedChunks) - 1; i >= 0; i-- { for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.oooMmappedChunks[i] c := s.ooo.oooMmappedChunks[i]
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) { if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
addChunk(c.minTime, c.maxTime, ref) addChunk(c.minTime, c.maxTime, ref)
@ -232,6 +236,11 @@ func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
} }
s.Lock() s.Lock()
if s.ooo == nil {
// There is no OOO data for this series.
s.Unlock()
return nil, storage.ErrNotFound
}
c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
s.Unlock() s.Unlock()
if err != nil { if err != nil {
@ -302,18 +311,23 @@ func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) {
// TODO: consider having a lock specifically for ooo data. // TODO: consider having a lock specifically for ooo data.
ms.Lock() ms.Lock()
if ms.ooo == nil {
ms.Unlock()
continue
}
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
if mmapRef == 0 && len(ms.oooMmappedChunks) > 0 { if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRef = ms.oooMmappedChunks[len(ms.oooMmappedChunks)-1].ref mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref
} }
seq, off := mmapRef.Unpack() seq, off := mmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) { if seq > lastSeq || (seq == lastSeq && off > lastOff) {
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off
} }
if len(ms.oooMmappedChunks) > 0 { if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef) ch.postings = append(ch.postings, seriesRef)
for _, c := range ms.oooMmappedChunks { for _, c := range ms.ooo.oooMmappedChunks {
if c.minTime < ch.mint { if c.minTime < ch.mint {
ch.mint = c.minTime ch.mint = c.minTime
} }
@ -400,8 +414,8 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
return p return p
} }
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return ir.ch.oooIR.series(ref, lset, chks, ir.ch.lastMmapRef) return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef)
} }
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {

View file

@ -21,7 +21,6 @@ import (
"github.com/oklog/ulid" "github.com/oklog/ulid"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/exp/slices"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -240,7 +239,14 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
} }
for _, m := range ms { for _, m := range ms {
if labelMustBeSet[m.Name] { if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least.
k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v)
if err != nil {
return nil, err
}
its = append(its, allPostings)
} else if labelMustBeSet[m.Name] {
// If this matcher must be non-empty, we can be smarter. // If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("") matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
@ -269,6 +275,9 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it) its = append(its, it)
} else { // l="a" } else { // l="a"
// Non-Not matcher, use normal postingsForMatcher. // Non-Not matcher, use normal postingsForMatcher.
@ -276,6 +285,9 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if index.IsEmptyPostingsType(it) {
return index.EmptyPostings(), nil
}
its = append(its, it) its = append(its, it)
} }
} else { // l="" } else { // l=""
@ -322,7 +334,6 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
if m.Type == labels.MatchRegexp { if m.Type == labels.MatchRegexp {
setMatches := findSetMatches(m.GetRegexString()) setMatches := findSetMatches(m.GetRegexString())
if len(setMatches) > 0 { if len(setMatches) > 0 {
slices.Sort(setMatches)
return ix.Postings(m.Name, setMatches...) return ix.Postings(m.Name, setMatches...)
} }
} }
@ -333,14 +344,9 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
} }
var res []string var res []string
lastVal, isSorted := "", true
for _, val := range vals { for _, val := range vals {
if m.Matches(val) { if m.Matches(val) {
res = append(res, val) res = append(res, val)
if isSorted && val < lastVal {
isSorted = false
}
lastVal = val
} }
} }
@ -348,9 +354,6 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
if !isSorted {
slices.Sort(res)
}
return ix.Postings(m.Name, res...) return ix.Postings(m.Name, res...)
} }
@ -362,20 +365,17 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting
} }
var res []string var res []string
lastVal, isSorted := "", true // If the inverse match is ="", we just want all the values.
for _, val := range vals { if m.Type == labels.MatchEqual && m.Value == "" {
if !m.Matches(val) { res = vals
res = append(res, val) } else {
if isSorted && val < lastVal { for _, val := range vals {
isSorted = false if !m.Matches(val) {
res = append(res, val)
} }
lastVal = val
} }
} }
if !isSorted {
slices.Sort(res)
}
return ix.Postings(m.Name, res...) return ix.Postings(m.Name, res...)
} }
@ -426,6 +426,16 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin
return r.LabelNamesFor(postings...) return r.LabelNamesFor(postings...)
} }
// seriesData, used inside other iterators, are updated when we move from one series to another.
type seriesData struct {
chks []chunks.Meta
intervals tombstones.Intervals
labels labels.Labels
}
// Labels implements part of storage.Series and storage.ChunkSeries.
func (s *seriesData) Labels() labels.Labels { return s.labels }
// blockBaseSeriesSet allows to iterate over all series in the single block. // blockBaseSeriesSet allows to iterate over all series in the single block.
// Iterated series are trimmed with given min and max time as well as tombstones. // Iterated series are trimmed with given min and max time as well as tombstones.
// See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. // See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating.
@ -438,17 +448,16 @@ type blockBaseSeriesSet struct {
mint, maxt int64 mint, maxt int64
disableTrimming bool disableTrimming bool
currIterFn func() *populateWithDelGenericSeriesIterator curr seriesData
currLabels labels.Labels
bufChks []chunks.Meta bufChks []chunks.Meta
bufLbls labels.Labels builder labels.ScratchBuilder
err error err error
} }
func (b *blockBaseSeriesSet) Next() bool { func (b *blockBaseSeriesSet) Next() bool {
for b.p.Next() { for b.p.Next() {
if err := b.index.Series(b.p.At(), &b.bufLbls, &b.bufChks); err != nil { if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil {
// Postings may be stale. Skip if no underlying series exists. // Postings may be stale. Skip if no underlying series exists.
if errors.Cause(err) == storage.ErrNotFound { if errors.Cause(err) == storage.ErrNotFound {
continue continue
@ -519,12 +528,9 @@ func (b *blockBaseSeriesSet) Next() bool {
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64}) intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
} }
b.currLabels = make(labels.Labels, len(b.bufLbls)) b.curr.labels = b.builder.Labels()
copy(b.currLabels, b.bufLbls) b.curr.chks = chks
b.curr.intervals = intervals
b.currIterFn = func() *populateWithDelGenericSeriesIterator {
return newPopulateWithDelGenericSeriesIterator(b.blockID, b.chunks, chks, intervals)
}
return true return true
} }
return false return false
@ -556,29 +562,26 @@ type populateWithDelGenericSeriesIterator struct {
// the same, single series. // the same, single series.
chks []chunks.Meta chks []chunks.Meta
i int i int // Index into chks; -1 if not started yet.
err error err error
bufIter *DeletedIterator bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here.
intervals tombstones.Intervals intervals tombstones.Intervals
currDelIter chunkenc.Iterator currDelIter chunkenc.Iterator
currChkMeta chunks.Meta currChkMeta chunks.Meta
} }
func newPopulateWithDelGenericSeriesIterator( func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
blockID ulid.ULID, p.blockID = blockID
chunks ChunkReader, p.chunks = cr
chks []chunks.Meta, p.chks = chks
intervals tombstones.Intervals, p.i = -1
) *populateWithDelGenericSeriesIterator { p.err = nil
return &populateWithDelGenericSeriesIterator{ p.bufIter.Iter = nil
blockID: blockID, p.bufIter.Intervals = p.bufIter.Intervals[:0]
chunks: chunks, p.intervals = intervals
chks: chks, p.currDelIter = nil
i: -1, p.currChkMeta = chunks.Meta{}
bufIter: &DeletedIterator{},
intervals: intervals,
}
} }
func (p *populateWithDelGenericSeriesIterator) next() bool { func (p *populateWithDelGenericSeriesIterator) next() bool {
@ -618,28 +621,55 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
// We don't want the full chunk, or it's potentially still opened, take // We don't want the full chunk, or it's potentially still opened, take
// just a part of it. // just a part of it.
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(nil) p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter)
p.currDelIter = p.bufIter p.currDelIter = &p.bufIter
return true return true
} }
func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err } func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err }
func (p *populateWithDelGenericSeriesIterator) toSeriesIterator() chunkenc.Iterator { type blockSeriesEntry struct {
return &populateWithDelSeriesIterator{populateWithDelGenericSeriesIterator: p} chunks ChunkReader
blockID ulid.ULID
seriesData
} }
func (p *populateWithDelGenericSeriesIterator) toChunkSeriesIterator() chunks.Iterator { func (s *blockSeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
return &populateWithDelChunkSeriesIterator{populateWithDelGenericSeriesIterator: p} pi, ok := it.(*populateWithDelSeriesIterator)
if !ok {
pi = &populateWithDelSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
}
type chunkSeriesEntry struct {
chunks ChunkReader
blockID ulid.ULID
seriesData
}
func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator {
pi, ok := it.(*populateWithDelChunkSeriesIterator)
if !ok {
pi = &populateWithDelChunkSeriesIterator{}
}
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
return pi
} }
// populateWithDelSeriesIterator allows to iterate over samples for the single series. // populateWithDelSeriesIterator allows to iterate over samples for the single series.
type populateWithDelSeriesIterator struct { type populateWithDelSeriesIterator struct {
*populateWithDelGenericSeriesIterator populateWithDelGenericSeriesIterator
curr chunkenc.Iterator curr chunkenc.Iterator
} }
func (p *populateWithDelSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.curr = nil
}
func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType { func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
if p.curr != nil { if p.curr != nil {
if valueType := p.curr.Next(); valueType != chunkenc.ValNone { if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
@ -651,7 +681,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
if p.currDelIter != nil { if p.currDelIter != nil {
p.curr = p.currDelIter p.curr = p.currDelIter
} else { } else {
p.curr = p.currChkMeta.Chunk.Iterator(nil) p.curr = p.currChkMeta.Chunk.Iterator(p.curr)
} }
if valueType := p.curr.Next(); valueType != chunkenc.ValNone { if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
return valueType return valueType
@ -701,11 +731,16 @@ func (p *populateWithDelSeriesIterator) Err() error {
} }
type populateWithDelChunkSeriesIterator struct { type populateWithDelChunkSeriesIterator struct {
*populateWithDelGenericSeriesIterator populateWithDelGenericSeriesIterator
curr chunks.Meta curr chunks.Meta
} }
func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
p.curr = chunks.Meta{}
}
func (p *populateWithDelChunkSeriesIterator) Next() bool { func (p *populateWithDelChunkSeriesIterator) Next() bool {
if !p.next() { if !p.next() {
return false return false
@ -714,7 +749,6 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if p.currDelIter == nil { if p.currDelIter == nil {
return true return true
} }
valueType := p.currDelIter.Next() valueType := p.currDelIter.Next()
if valueType == chunkenc.ValNone { if valueType == chunkenc.ValNone {
if err := p.currDelIter.Err(); err != nil { if err := p.currDelIter.Err(); err != nil {
@ -789,9 +823,47 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
t, v = p.currDelIter.At() t, v = p.currDelIter.At()
app.Append(t, v) app.Append(t, v)
} }
case chunkenc.ValFloatHistogram:
newChunk = chunkenc.NewFloatHistogramChunk()
if app, err = newChunk.Appender(); err != nil {
break
}
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.FloatHistogramChunk); ok {
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
}
var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram()
p.curr.MinTime = t
app.AppendFloatHistogram(t, h)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt)
break
}
t, h = p.currDelIter.AtFloatHistogram()
// Defend against corrupted chunks.
pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h)
if len(pI)+len(nI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
len(pI), len(nI),
)
break
}
if counterReset {
err = errors.New("detected unexpected counter reset in histogram")
break
}
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
app.AppendFloatHistogram(t, h)
}
default: default:
// TODO(beorn7): Need FloatHistogram eventually.
err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType) err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType)
} }
@ -828,19 +900,16 @@ func newBlockSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p inde
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
disableTrimming: disableTrimming, disableTrimming: disableTrimming,
bufLbls: make(labels.Labels, 0, 10),
}, },
} }
} }
func (b *blockSeriesSet) At() storage.Series { func (b *blockSeriesSet) At() storage.Series {
// At can be looped over before iterating, so save the current value locally. // At can be looped over before iterating, so save the current values locally.
currIterFn := b.currIterFn return &blockSeriesEntry{
return &storage.SeriesEntry{ chunks: b.chunks,
Lset: b.currLabels, blockID: b.blockID,
SampleIteratorFn: func() chunkenc.Iterator { seriesData: b.curr,
return currIterFn().toSeriesIterator()
},
} }
} }
@ -862,19 +931,16 @@ func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombst
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
disableTrimming: disableTrimming, disableTrimming: disableTrimming,
bufLbls: make(labels.Labels, 0, 10),
}, },
} }
} }
func (b *blockChunkSeriesSet) At() storage.ChunkSeries { func (b *blockChunkSeriesSet) At() storage.ChunkSeries {
// At can be looped over before iterating, so save the current value locally. // At can be looped over before iterating, so save the current values locally.
currIterFn := b.currIterFn return &chunkSeriesEntry{
return &storage.ChunkSeriesEntry{ chunks: b.chunks,
Lset: b.currLabels, blockID: b.blockID,
ChunkIteratorFn: func() chunks.Iterator { seriesData: b.curr,
return currIterFn().toChunkSeriesIterator()
},
} }
} }

View file

@ -17,7 +17,6 @@ package record
import ( import (
"math" "math"
"sort"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -50,6 +49,8 @@ const (
Metadata Type = 6 Metadata Type = 6
// HistogramSamples is used to match WAL records of type Histograms. // HistogramSamples is used to match WAL records of type Histograms.
HistogramSamples Type = 7 HistogramSamples Type = 7
// FloatHistogramSamples is used to match WAL records of type Float Histograms.
FloatHistogramSamples Type = 8
) )
func (rt Type) String() string { func (rt Type) String() string {
@ -64,6 +65,8 @@ func (rt Type) String() string {
return "exemplars" return "exemplars"
case HistogramSamples: case HistogramSamples:
return "histogram_samples" return "histogram_samples"
case FloatHistogramSamples:
return "float_histogram_samples"
case MmapMarkers: case MmapMarkers:
return "mmapmarkers" return "mmapmarkers"
case Metadata: case Metadata:
@ -174,6 +177,13 @@ type RefHistogramSample struct {
H *histogram.Histogram H *histogram.Histogram
} }
// RefFloatHistogramSample is a float histogram.
type RefFloatHistogramSample struct {
Ref chunks.HeadSeriesRef
T int64
FH *histogram.FloatHistogram
}
// RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. // RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk.
type RefMmapMarker struct { type RefMmapMarker struct {
Ref chunks.HeadSeriesRef Ref chunks.HeadSeriesRef
@ -182,7 +192,9 @@ type RefMmapMarker struct {
// Decoder decodes series, sample, metadata and tombstone records. // Decoder decodes series, sample, metadata and tombstone records.
// The zero value is ready to use. // The zero value is ready to use.
type Decoder struct{} type Decoder struct {
builder labels.ScratchBuilder
}
// Type returns the type of the record. // Type returns the type of the record.
// Returns RecordUnknown if no valid record type is found. // Returns RecordUnknown if no valid record type is found.
@ -191,7 +203,7 @@ func (d *Decoder) Type(rec []byte) Type {
return Unknown return Unknown
} }
switch t := Type(rec[0]); t { switch t := Type(rec[0]); t {
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples: case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples:
return t return t
} }
return Unknown return Unknown
@ -267,14 +279,15 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e
// DecodeLabels decodes one set of labels from buf. // DecodeLabels decodes one set of labels from buf.
func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels {
lset := make(labels.Labels, dec.Uvarint()) // TODO: reconsider if this function could be pushed down into labels.Labels to be more efficient.
d.builder.Reset()
for i := range lset { nLabels := dec.Uvarint()
lset[i].Name = dec.UvarintStr() for i := 0; i < nLabels; i++ {
lset[i].Value = dec.UvarintStr() lName := dec.UvarintStr()
lValue := dec.UvarintStr()
d.builder.Add(lName, lValue)
} }
sort.Sort(lset) return d.builder.Labels()
return lset
} }
// Samples appends samples in rec to the given slice. // Samples appends samples in rec to the given slice.
@ -425,15 +438,11 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
rh := RefHistogramSample{ rh := RefHistogramSample{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
T: baseTime + dtime, T: baseTime + dtime,
H: &histogram.Histogram{ H: &histogram.Histogram{},
Schema: 0,
ZeroThreshold: 0,
ZeroCount: 0,
Count: 0,
Sum: 0,
},
} }
rh.H.CounterResetHint = histogram.CounterResetHint(dec.Byte())
rh.H.Schema = int32(dec.Varint64()) rh.H.Schema = int32(dec.Varint64())
rh.H.ZeroThreshold = math.Float64frombits(dec.Be64()) rh.H.ZeroThreshold = math.Float64frombits(dec.Be64())
@ -487,6 +496,84 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
return histograms, nil return histograms, nil
} }
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != FloatHistogramSamples {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return histograms, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
rh := RefFloatHistogramSample{
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
T: baseTime + dtime,
FH: &histogram.FloatHistogram{},
}
rh.FH.CounterResetHint = histogram.CounterResetHint(dec.Byte())
rh.FH.Schema = int32(dec.Varint64())
rh.FH.ZeroThreshold = dec.Be64Float64()
rh.FH.ZeroCount = dec.Be64Float64()
rh.FH.Count = dec.Be64Float64()
rh.FH.Sum = dec.Be64Float64()
l := dec.Uvarint()
if l > 0 {
rh.FH.PositiveSpans = make([]histogram.Span, l)
}
for i := range rh.FH.PositiveSpans {
rh.FH.PositiveSpans[i].Offset = int32(dec.Varint64())
rh.FH.PositiveSpans[i].Length = dec.Uvarint32()
}
l = dec.Uvarint()
if l > 0 {
rh.FH.NegativeSpans = make([]histogram.Span, l)
}
for i := range rh.FH.NegativeSpans {
rh.FH.NegativeSpans[i].Offset = int32(dec.Varint64())
rh.FH.NegativeSpans[i].Length = dec.Uvarint32()
}
l = dec.Uvarint()
if l > 0 {
rh.FH.PositiveBuckets = make([]float64, l)
}
for i := range rh.FH.PositiveBuckets {
rh.FH.PositiveBuckets[i] = dec.Be64Float64()
}
l = dec.Uvarint()
if l > 0 {
rh.FH.NegativeBuckets = make([]float64, l)
}
for i := range rh.FH.NegativeBuckets {
rh.FH.NegativeBuckets[i] = dec.Be64Float64()
}
histograms = append(histograms, rh)
}
if dec.Err() != nil {
return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms))
}
if len(dec.B) > 0 {
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return histograms, nil
}
// Encoder encodes series, sample, and tombstones records. // Encoder encodes series, sample, and tombstones records.
// The zero value is ready to use. // The zero value is ready to use.
type Encoder struct{} type Encoder struct{}
@ -525,12 +612,13 @@ func (e *Encoder) Metadata(metadata []RefMetadata, b []byte) []byte {
// EncodeLabels encodes the contents of labels into buf. // EncodeLabels encodes the contents of labels into buf.
func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) { func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) {
buf.PutUvarint(len(lbls)) // TODO: reconsider if this function could be pushed down into labels.Labels to be more efficient.
buf.PutUvarint(lbls.Len())
for _, l := range lbls { lbls.Range(func(l labels.Label) {
buf.PutUvarintStr(l.Name) buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value) buf.PutUvarintStr(l.Value)
} })
} }
// Samples appends the encoded samples to b and returns the resulting slice. // Samples appends the encoded samples to b and returns the resulting slice.
@ -631,6 +719,8 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []
buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T) buf.PutVarint64(h.T - first.T)
buf.PutByte(byte(h.H.CounterResetHint))
buf.PutVarint64(int64(h.H.Schema)) buf.PutVarint64(int64(h.H.Schema))
buf.PutBE64(math.Float64bits(h.H.ZeroThreshold)) buf.PutBE64(math.Float64bits(h.H.ZeroThreshold))
@ -663,3 +753,56 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []
return buf.Get() return buf.Get()
} }
func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(FloatHistogramSamples))
if len(histograms) == 0 {
return buf.Get()
}
// Store base timestamp and base reference number of first histogram.
// All histograms encode their timestamp and ref as delta to those.
first := histograms[0]
buf.PutBE64(uint64(first.Ref))
buf.PutBE64int64(first.T)
for _, h := range histograms {
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
buf.PutVarint64(h.T - first.T)
buf.PutByte(byte(h.FH.CounterResetHint))
buf.PutVarint64(int64(h.FH.Schema))
buf.PutBEFloat64(h.FH.ZeroThreshold)
buf.PutBEFloat64(h.FH.ZeroCount)
buf.PutBEFloat64(h.FH.Count)
buf.PutBEFloat64(h.FH.Sum)
buf.PutUvarint(len(h.FH.PositiveSpans))
for _, s := range h.FH.PositiveSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.FH.NegativeSpans))
for _, s := range h.FH.NegativeSpans {
buf.PutVarint64(int64(s.Offset))
buf.PutUvarint32(s.Length)
}
buf.PutUvarint(len(h.FH.PositiveBuckets))
for _, b := range h.FH.PositiveBuckets {
buf.PutBEFloat64(b)
}
buf.PutUvarint(len(h.FH.NegativeBuckets))
for _, b := range h.FH.NegativeBuckets {
buf.PutBEFloat64(b)
}
}
return buf.Get()
}

View file

@ -49,10 +49,11 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
const commitAfter = 10000 const commitAfter = 10000
ctx := context.Background() ctx := context.Background()
app := w.Appender(ctx) app := w.Appender(ctx)
var it chunkenc.Iterator
for _, s := range series { for _, s := range series {
ref := storage.SeriesRef(0) ref := storage.SeriesRef(0)
it := s.Iterator() it = s.Iterator(it)
lset := s.Labels() lset := s.Labels()
typ := it.Next() typ := it.Next()
lastTyp := typ lastTyp := typ
@ -73,7 +74,10 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
ref, err = app.Append(ref, lset, t, v) ref, err = app.Append(ref, lset, t, v)
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
t, h := it.AtHistogram() t, h := it.AtHistogram()
ref, err = app.AppendHistogram(ref, lset, t, h) ref, err = app.AppendHistogram(ref, lset, t, h, nil)
case chunkenc.ValFloatHistogram:
t, fh := it.AtFloatHistogram()
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
default: default:
return "", fmt.Errorf("unknown sample type %s", typ.String()) return "", fmt.Errorf("unknown sample type %s", typ.String())
} }

View file

@ -1018,7 +1018,7 @@ func (r *walReader) next() bool {
// If we reached the end of the reader, advance to the next one // If we reached the end of the reader, advance to the next one
// and close. // and close.
// Do not close on the last one as it will still be appended to. // Do not close on the last one as it will still be appended to.
if err == io.EOF { if errors.Is(err, io.EOF) {
if r.cur == len(r.files)-1 { if r.cur == len(r.files)-1 {
return false return false
} }

View file

@ -96,7 +96,7 @@ type LiveReader struct {
// not be used again. It is up to the user to decide when to stop trying should // not be used again. It is up to the user to decide when to stop trying should
// io.EOF be returned. // io.EOF be returned.
func (r *LiveReader) Err() error { func (r *LiveReader) Err() error {
if r.eofNonErr && r.err == io.EOF { if r.eofNonErr && errors.Is(r.err, io.EOF) {
return nil return nil
} }
return r.err return r.err

View file

@ -43,7 +43,7 @@ func NewReader(r io.Reader) *Reader {
// It must not be called again after it returned false. // It must not be called again after it returned false.
func (r *Reader) Next() bool { func (r *Reader) Next() bool {
err := r.next() err := r.next()
if errors.Cause(err) == io.EOF { if errors.Is(err, io.EOF) {
// The last WAL segment record shouldn't be torn(should be full or last). // The last WAL segment record shouldn't be torn(should be full or last).
// The last record would be torn after a crash just before // The last record would be torn after a crash just before
// the last record part could be persisted to disk. // the last record part could be persisted to disk.

View file

@ -50,6 +50,7 @@ type WriteTo interface {
Append([]record.RefSample) bool Append([]record.RefSample) bool
AppendExemplars([]record.RefExemplar) bool AppendExemplars([]record.RefExemplar) bool
AppendHistograms([]record.RefHistogramSample) bool AppendHistograms([]record.RefHistogramSample) bool
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
StoreSeries([]record.RefSeries, int) StoreSeries([]record.RefSeries, int)
// Next two methods are intended for garbage-collection: first we call // Next two methods are intended for garbage-collection: first we call
@ -476,13 +477,15 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
// Also used with readCheckpoint - implements segmentReadFn. // Also used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
var ( var (
dec record.Decoder dec record.Decoder
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
samplesToSend []record.RefSample samplesToSend []record.RefSample
exemplars []record.RefExemplar exemplars []record.RefExemplar
histograms []record.RefHistogramSample histograms []record.RefHistogramSample
histogramsToSend []record.RefHistogramSample histogramsToSend []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
floatHistogramsToSend []record.RefFloatHistogramSample
) )
for r.Next() && !isClosed(w.quit) { for r.Next() && !isClosed(w.quit) {
rec := r.Record() rec := r.Record()
@ -567,7 +570,33 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
w.writer.AppendHistograms(histogramsToSend) w.writer.AppendHistograms(histogramsToSend)
histogramsToSend = histogramsToSend[:0] histogramsToSend = histogramsToSend[:0]
} }
case record.FloatHistogramSamples:
// Skip if experimental "histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
if !tail {
break
}
floatHistograms, err := dec.FloatHistogramSamples(rec, floatHistograms[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
for _, fh := range floatHistograms {
if fh.T > w.startTimestamp {
if !w.sendSamples {
w.sendSamples = true
duration := time.Since(w.startTime)
level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
}
floatHistogramsToSend = append(floatHistogramsToSend, fh)
}
}
if len(floatHistogramsToSend) > 0 {
w.writer.AppendFloatHistograms(floatHistogramsToSend)
floatHistogramsToSend = floatHistogramsToSend[:0]
}
case record.Tombstones: case record.Tombstones:
default: default:

12
vendor/modules.txt vendored
View file

@ -80,7 +80,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15 ## explicit; go 1.15
github.com/alecthomas/units github.com/alecthomas/units
# github.com/aws/aws-sdk-go v1.44.190 # github.com/aws/aws-sdk-go v1.44.192
## explicit; go 1.11 ## explicit; go 1.11
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awserr
@ -149,10 +149,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
# github.com/aws/aws-sdk-go-v2/config v1.18.10 # github.com/aws/aws-sdk-go-v2/config v1.18.11
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/config github.com/aws/aws-sdk-go-v2/config
# github.com/aws/aws-sdk-go-v2/credentials v1.13.10 # github.com/aws/aws-sdk-go-v2/credentials v1.13.11
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
@ -165,7 +165,7 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.49 # github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.50
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/s3/manager github.com/aws/aws-sdk-go-v2/feature/s3/manager
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 # github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27
@ -396,7 +396,7 @@ github.com/prometheus/common/sigv4
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
# github.com/prometheus/prometheus v0.41.0 # github.com/prometheus/prometheus v0.42.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/prometheus/prometheus/config github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery
@ -602,7 +602,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa # google.golang.org/genproto v0.0.0-20230131230820-1c016267d619
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/annotations