vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-05-18 12:22:09 -07:00
parent d9b3a92348
commit b6dda0fefe
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
66 changed files with 1811 additions and 790 deletions

10
go.mod
View file

@ -24,7 +24,7 @@ require (
github.com/googleapis/gax-go/v2 v2.8.0 github.com/googleapis/gax-go/v2 v2.8.0
github.com/influxdata/influxdb v1.11.1 github.com/influxdata/influxdb v1.11.1
github.com/klauspost/compress v1.16.5 github.com/klauspost/compress v1.16.5
github.com/prometheus/prometheus v0.43.1 github.com/prometheus/prometheus v0.44.0
github.com/urfave/cli/v2 v2.25.3 github.com/urfave/cli/v2 v2.25.3
github.com/valyala/fastjson v1.6.4 github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0 github.com/valyala/fastrand v1.1.0
@ -35,7 +35,7 @@ require (
golang.org/x/net v0.10.0 golang.org/x/net v0.10.0
golang.org/x/oauth2 v0.8.0 golang.org/x/oauth2 v0.8.0
golang.org/x/sys v0.8.0 golang.org/x/sys v0.8.0
google.golang.org/api v0.122.0 google.golang.org/api v0.123.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
@ -43,13 +43,13 @@ require github.com/bmatcuk/doublestar/v4 v4.6.0
require ( require (
cloud.google.com/go v0.110.2 // indirect cloud.google.com/go v0.110.2 // indirect
cloud.google.com/go/compute v1.19.2 // indirect cloud.google.com/go/compute v1.19.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.0.1 // indirect cloud.google.com/go/iam v1.0.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.260 // indirect github.com/aws/aws-sdk-go v1.44.265 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.24 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.24 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 // indirect
@ -110,7 +110,7 @@ require (
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.2.1 // indirect go.uber.org/goleak v1.2.1 // indirect
golang.org/x/crypto v0.9.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/sync v0.2.0 // indirect golang.org/x/sync v0.2.0 // indirect
golang.org/x/text v0.9.0 // indirect golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect

48
go.sum
View file

@ -21,8 +21,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.19.2 h1:GbJtPo8OKVHbVep8jvM57KidbYHxeE68LOVqouNLrDY= cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds=
cloud.google.com/go/compute v1.19.2/go.mod h1:5f5a+iC1IriXYauaQ0EyQmEAEq9CGRnV5xJSQSlTV08= cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
@ -52,7 +52,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@ -87,8 +87,8 @@ github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.260 h1:78IJkDpDPXvLXvIkNAKDP/i3z8Vj+3sTAtQYw/v/2o8= github.com/aws/aws-sdk-go v1.44.265 h1:rlBuD8OYjM5Vfcf7jDa264oVHqlPqY7y7o+JmrjNFUc=
github.com/aws/aws-sdk-go v1.44.260/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.265/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
@ -157,10 +157,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
@ -172,7 +172,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
@ -284,7 +284,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
@ -301,14 +301,14 @@ github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5O
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/influxdata/influxdb v1.11.1 h1:VEkQVMJ83gjpyS2FJuQaSbt4Mu+btGBoZbVq0XwTHGQ= github.com/influxdata/influxdb v1.11.1 h1:VEkQVMJ83gjpyS2FJuQaSbt4Mu+btGBoZbVq0XwTHGQ=
github.com/influxdata/influxdb v1.11.1/go.mod h1:WSTwm8ZvJARODSZJfcxdghcjCQVstHwClgO6MrbnGt0= github.com/influxdata/influxdb v1.11.1/go.mod h1:WSTwm8ZvJARODSZJfcxdghcjCQVstHwClgO6MrbnGt0=
github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -340,7 +340,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@ -352,7 +352,7 @@ github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -369,7 +369,7 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -403,8 +403,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/prometheus v0.43.1 h1:Z/Z0S0CoPUVtUnHGokFksWMssSw2Y1Ir9NnWS1pPWU0= github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc=
github.com/prometheus/prometheus v0.43.1/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -413,7 +413,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -499,8 +499,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -522,7 +522,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -691,7 +691,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -714,8 +714,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.122.0 h1:zDobeejm3E7pEG1mNHvdxvjs5XJoCMzyNH+CmwL94Es= google.golang.org/api v0.123.0 h1:yHVU//vA+qkOhm4reEC9LtzHVUCN/IqqNRl1iQ9xE20=
google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.123.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

View file

@ -15,4 +15,4 @@
package internal package internal
// Version is the current tagged release of the library. // Version is the current tagged release of the library.
const Version = "1.19.2" const Version = "1.19.3"

View file

@ -252,19 +252,8 @@ type Config struct {
// and specify a Retryer instead. // and specify a Retryer instead.
SleepDelay func(time.Duration) SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. // Deprecated: This setting no longer has any effect.
// Will default to false. This would only be used for empty directory names in s3 requests. // RESTful paths are no longer cleaned after request serialization.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that // EnableEndpointDiscovery will allow for endpoint discovery on operations that
@ -497,8 +486,8 @@ func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
return c return c
} }
// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value // Deprecated: This setting no longer has any effect.
// returning a Config pointer for chaining. // RESTful paths are no longer cleaned after request serialization.
func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
c.DisableRestProtocolURICleaning = &t c.DisableRestProtocolURICleaning = &t
return c return c
@ -611,7 +600,7 @@ func mergeInConfig(dst *Config, other *Config) {
if other.DisableRestProtocolURICleaning != nil { if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
} }
if other.EnforceShouldRetryCheck != nil { if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
} }

View file

@ -3605,6 +3605,12 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-east-1.amazonaws.com", Hostname: "athena-fips.us-east-1.amazonaws.com",
}, },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-east-1.api.aws",
},
endpointKey{ endpointKey{
Region: "us-east-2", Region: "us-east-2",
}: endpoint{}, }: endpoint{},
@ -3620,6 +3626,12 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-east-2.amazonaws.com", Hostname: "athena-fips.us-east-2.amazonaws.com",
}, },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-east-2.api.aws",
},
endpointKey{ endpointKey{
Region: "us-west-1", Region: "us-west-1",
}: endpoint{}, }: endpoint{},
@ -3635,6 +3647,12 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-west-1.amazonaws.com", Hostname: "athena-fips.us-west-1.amazonaws.com",
}, },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-west-1.api.aws",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
@ -3650,6 +3668,12 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-west-2.amazonaws.com", Hostname: "athena-fips.us-west-2.amazonaws.com",
}, },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-west-2.api.aws",
},
}, },
}, },
"auditmanager": service{ "auditmanager": service{
@ -7762,6 +7786,12 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
@ -7777,6 +7807,15 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "eu-west-3", Region: "eu-west-3",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "fips-us-east-1", Region: "fips-us-east-1",
}: endpoint{ }: endpoint{
@ -7795,6 +7834,15 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "fips-us-west-2", Region: "fips-us-west-2",
}: endpoint{ }: endpoint{
@ -7828,6 +7876,12 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "us-west-1", Region: "us-west-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
@ -18577,6 +18631,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-south-1", Region: "ap-south-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ap-southeast-1", Region: "ap-southeast-1",
}: endpoint{}, }: endpoint{},
@ -18586,18 +18643,27 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-southeast-3", Region: "ap-southeast-3",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-north-1", Region: "eu-north-1",
}: endpoint{}, }: endpoint{},
endpointKey{ endpointKey{
Region: "eu-south-1", Region: "eu-south-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-west-1", Region: "eu-west-1",
}: endpoint{}, }: endpoint{},
@ -20401,18 +20467,63 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "profile-fips.ca-central-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{ endpointKey{
Region: "eu-west-2", Region: "eu-west-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
Hostname: "profile-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "profile-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "profile-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-east-1", Region: "us-east-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "profile-fips.us-east-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "profile-fips.us-west-2.amazonaws.com",
},
}, },
}, },
"projects.iot1click": service{ "projects.iot1click": service{
@ -30418,6 +30529,16 @@ var awscnPartition = partition{
}: endpoint{}, }: endpoint{},
}, },
}, },
"airflow": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"api.ecr": service{ "api.ecr": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{
@ -31062,6 +31183,16 @@ var awscnPartition = partition{
}: endpoint{}, }: endpoint{},
}, },
}, },
"emr-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"es": service{ "es": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{
@ -32930,6 +33061,12 @@ var awsusgovPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-gov-east-1.amazonaws.com", Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-gov-east-1.api.aws",
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{}, }: endpoint{},
@ -32945,6 +33082,12 @@ var awsusgovPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "athena-fips.us-gov-west-1.amazonaws.com", Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "athena-fips.us-gov-west-1.api.aws",
},
}, },
}, },
"autoscaling": service{ "autoscaling": service{
@ -36590,9 +36733,35 @@ var awsusgovPartition = partition{
endpointKey{ endpointKey{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
Deprecated: boxedTrue,
},
}, },
}, },
"runtime.lex": service{ "runtime.lex": service{
@ -38285,6 +38454,16 @@ var awsisoPartition = partition{
}: endpoint{}, }: endpoint{},
}, },
}, },
"cloudcontrolapi": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
},
},
"cloudformation": service{ "cloudformation": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{
@ -38771,6 +38950,28 @@ var awsisoPartition = partition{
}: endpoint{}, }: endpoint{},
}, },
}, },
"rbin": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-iso-east-1",
}: endpoint{
Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
},
},
},
"rds": service{ "rds": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{
@ -39432,6 +39633,28 @@ var awsisobPartition = partition{
}: endpoint{}, }: endpoint{},
}, },
}, },
"rbin": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-isob-east-1",
}: endpoint{
Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
},
},
},
"rds": service{ "rds": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{ endpointKey{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.260" const SDKVersion = "1.44.265"

View file

@ -9,7 +9,6 @@ import (
"math" "math"
"net/http" "net/http"
"net/url" "net/url"
"path"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
@ -134,9 +133,6 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
} }
r.HTTPRequest.URL.RawQuery = query.Encode() r.HTTPRequest.URL.RawQuery = query.Encode()
if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
cleanPath(r.HTTPRequest.URL)
}
} }
func buildBody(r *request.Request, v reflect.Value) { func buildBody(r *request.Request, v reflect.Value) {
@ -244,19 +240,6 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec
return nil return nil
} }
func cleanPath(u *url.URL) {
hasSlash := strings.HasSuffix(u.Path, "/")
// clean up path, removing duplicate `/`
u.Path = path.Clean(u.Path)
u.RawPath = path.Clean(u.RawPath)
if hasSlash && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
u.RawPath += "/"
}
}
// EscapePath escapes part of a URL path in Amazon style // EscapePath escapes part of a URL path in Amazon style
func EscapePath(path string, encodeSep bool) string { func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer var buf bytes.Buffer

View file

@ -1898,8 +1898,12 @@ type AssumeRoleWithSAMLInput struct {
// For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
// in the IAM User Guide. // in the IAM User Guide.
// //
// SAMLAssertion is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's
// String and GoString methods.
//
// SAMLAssertion is a required field // SAMLAssertion is a required field
SAMLAssertion *string `min:"4" type:"string" required:"true"` SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
} }
// String returns the string representation. // String returns the string representation.
@ -2264,8 +2268,12 @@ type AssumeRoleWithWebIdentityInput struct {
// the user who is using your application with a web identity provider before // the user who is using your application with a web identity provider before
// the application makes an AssumeRoleWithWebIdentity call. // the application makes an AssumeRoleWithWebIdentity call.
// //
// WebIdentityToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
// String and GoString methods.
//
// WebIdentityToken is a required field // WebIdentityToken is a required field
WebIdentityToken *string `min:"4" type:"string" required:"true"` WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
} }
// String returns the string representation. // String returns the string representation.
@ -2571,8 +2579,12 @@ type Credentials struct {
// The secret access key that can be used to sign requests. // The secret access key that can be used to sign requests.
// //
// SecretAccessKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by Credentials's
// String and GoString methods.
//
// SecretAccessKey is a required field // SecretAccessKey is a required field
SecretAccessKey *string `type:"string" required:"true"` SecretAccessKey *string `type:"string" required:"true" sensitive:"true"`
// The token that users must pass to the service API to use the temporary credentials. // The token that users must pass to the service API to use the temporary credentials.
// //

View file

@ -173,16 +173,16 @@ var (
// DefaultQueueConfig is the default remote queue configuration. // DefaultQueueConfig is the default remote queue configuration.
DefaultQueueConfig = QueueConfig{ DefaultQueueConfig = QueueConfig{
// With a maximum of 200 shards, assuming an average of 100ms remote write // With a maximum of 50 shards, assuming an average of 100ms remote write
// time and 500 samples per batch, we will be able to push 1M samples/s. // time and 2000 samples per batch, we will be able to push 1M samples/s.
MaxShards: 200, MaxShards: 50,
MinShards: 1, MinShards: 1,
MaxSamplesPerSend: 500, MaxSamplesPerSend: 2000,
// Each shard will have a max of 2500 samples pending in its channel, plus the pending // Each shard will have a max of 10,000 samples pending in its channel, plus the pending
// samples that have been enqueued. Theoretically we should only ever have about 3000 samples // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples
// per shard pending. At 200 shards that's 600k. // per shard pending. At 50 shards that's 600k.
Capacity: 2500, Capacity: 10000,
BatchSendDeadline: model.Duration(5 * time.Second), BatchSendDeadline: model.Duration(5 * time.Second),
// Backoff times for retrying a batch of samples on recoverable errors. // Backoff times for retrying a batch of samples on recoverable errors.
@ -194,7 +194,7 @@ var (
DefaultMetadataConfig = MetadataConfig{ DefaultMetadataConfig = MetadataConfig{
Send: true, Send: true,
SendInterval: model.Duration(1 * time.Minute), SendInterval: model.Duration(1 * time.Minute),
MaxSamplesPerSend: 500, MaxSamplesPerSend: 2000,
} }
// DefaultRemoteReadConfig is the default remote read configuration. // DefaultRemoteReadConfig is the default remote read configuration.

View file

@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
oldStr := oldTyp.String() oldStr := oldTyp.String()
newStr := newTyp.String() newStr := newTyp.String()
for i, s := range e.Errors { for i, s := range e.Errors {
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
} }
} }
return err return err

View file

@ -192,6 +192,30 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
// //
// This method returns a pointer to the receiving histogram for convenience. // This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
switch {
case other.CounterResetHint == h.CounterResetHint:
// Adding apples to apples, all good. No need to change anything.
case h.CounterResetHint == GaugeType:
// Adding something else to a gauge. That's probably OK. Outcome is a gauge.
// Nothing to do since the receiver is already marked as gauge.
case other.CounterResetHint == GaugeType:
// Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
h.CounterResetHint = GaugeType
case h.CounterResetHint == UnknownCounterReset:
// With the receiver's CounterResetHint being "unknown", this could still be legitimate
// if the caller knows what they are doing. Outcome is then again "unknown".
// No need to do anything since the receiver's CounterResetHint is already "unknown".
case other.CounterResetHint == UnknownCounterReset:
// Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
h.CounterResetHint = UnknownCounterReset
default:
// All other cases shouldn't actually happen.
// They are a direct collision of CounterReset and NotCounterReset.
// Conservatively set the CounterResetHint to "unknown" and isse a warning.
h.CounterResetHint = UnknownCounterReset
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
}
otherZeroCount := h.reconcileZeroBuckets(other) otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount h.ZeroCount += otherZeroCount
h.Count += other.Count h.Count += other.Count
@ -414,6 +438,10 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// of observations, but NOT the sum of observations) is smaller in the receiving // of observations, but NOT the sum of observations) is smaller in the receiving
// histogram compared to the previous histogram. Otherwise, it returns false. // histogram compared to the previous histogram. Otherwise, it returns false.
// //
// This method will shortcut to true if a CounterReset is detected, and shortcut
// to false if NotCounterReset is detected. Otherwise it will do the work to detect
// a reset.
//
// Special behavior in case the Schema or the ZeroThreshold are not the same in // Special behavior in case the Schema or the ZeroThreshold are not the same in
// both histograms: // both histograms:
// //
@ -432,12 +460,23 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// - Upon a decrease of the Schema, the buckets of the previous histogram are // - Upon a decrease of the Schema, the buckets of the previous histogram are
// merged so that they match the new, lower-resolution schema (again without // merged so that they match the new, lower-resolution schema (again without
// mutating the provided previous histogram). // mutating the provided previous histogram).
//
// Note that this kind of reset detection is quite expensive. Ideally, resets
// are detected at ingest time and stored in the TSDB, so that the reset
// information can be read directly from there rather than be detected each time
// again.
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.CounterResetHint == CounterReset {
return true
}
if h.CounterResetHint == NotCounterReset {
return false
}
// In all other cases of CounterResetHint (UnknownCounterReset and GaugeType),
// we go on as we would otherwise, for reasons explained below.
//
// If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes
// with a counter reset. Therefore, we have to do all the detailed work to find out if there
// is a counter reset or not.
// We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still
// allows the user to apply functions to gauge histograms that are only meant for counter histograms.
// In this case, we treat the gauge histograms as a counter histograms
// (and we plan to return a warning about it to the user).
if h.Count < previous.Count { if h.Count < previous.Count {
return true return true
} }
@ -785,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into
origIdx += span.Offset origIdx += span.Offset
} }
currIdx := i.targetIdx(origIdx) currIdx := i.targetIdx(origIdx)
if firstPass { switch {
case firstPass:
i.currIdx = currIdx i.currIdx = currIdx
firstPass = false firstPass = false
} else if currIdx != i.currIdx { case currIdx != i.currIdx:
// Reached next bucket in targetSchema. // Reached next bucket in targetSchema.
// Do not actually forward to the next bucket, but break out. // Do not actually forward to the next bucket, but break out.
break mergeLoop break mergeLoop

View file

@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
b = b[:0] b = b[:0]
i, j := 0, 0 i, j := 0, 0
for i < len(ls) && j < len(names) { for i < len(ls) && j < len(names) {
if names[j] < ls[i].Name { switch {
case names[j] < ls[i].Name:
j++ j++
} else if ls[i].Name < names[j] { case ls[i].Name < names[j]:
i++ i++
} else { default:
b = append(b, ls[i].Name...) b = append(b, ls[i].Name...)
b = append(b, seps[0]) b = append(b, seps[0])
b = append(b, ls[i].Value...) b = append(b, ls[i].Value...)
@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
b.WriteByte(labelSep) b.WriteByte(labelSep)
i, j := 0, 0 i, j := 0, 0
for i < len(ls) && j < len(names) { for i < len(ls) && j < len(names) {
if names[j] < ls[i].Name { switch {
case names[j] < ls[i].Name:
j++ j++
} else if ls[i].Name < names[j] { case ls[i].Name < names[j]:
i++ i++
} else { default:
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(seps[0])
} }
@ -546,8 +548,8 @@ func (b *Builder) Get(n string) string {
// Range calls f on each label in the Builder. // Range calls f on each label in the Builder.
func (b *Builder) Range(f func(l Label)) { func (b *Builder) Range(f func(l Label)) {
// Stack-based arrays to avoid heap allocation in most cases. // Stack-based arrays to avoid heap allocation in most cases.
var addStack [1024]Label var addStack [128]Label
var delStack [1024]string var delStack [128]string
// Take a copy of add and del, so they are unaffected by calls to Set() or Del(). // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) { b.base.Range(func(l Label) {
@ -569,24 +571,18 @@ func contains(s []Label, n string) bool {
return false return false
} }
// Labels returns the labels from the builder, adding them to res if non-nil. // Labels returns the labels from the builder.
// Argument res can be the same as b.base, if caller wants to overwrite that slice.
// If no modifications were made, the original labels are returned. // If no modifications were made, the original labels are returned.
func (b *Builder) Labels(res Labels) Labels { func (b *Builder) Labels() Labels {
if len(b.del) == 0 && len(b.add) == 0 { if len(b.del) == 0 && len(b.add) == 0 {
return b.base return b.base
} }
if res == nil { expectedSize := len(b.base) + len(b.add) - len(b.del)
// In the general case, labels are removed, modified or moved if expectedSize < 1 {
// rather than added. expectedSize = 1
res = make(Labels, 0, len(b.base))
} else {
res = res[:0]
} }
// Justification that res can be the same slice as base: in this loop res := make(Labels, 0, expectedSize)
// we move forward through base, and either skip an element or assign
// it to res at its current position or an earlier position.
for _, l := range b.base { for _, l := range b.base {
if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) {
continue continue
@ -636,3 +632,9 @@ func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite. // Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...) return append([]Label{}, b.add...)
} }
// Write the newly-built Labels out to ls.
// Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) {
*ls = append((*ls)[:0], b.add...)
}

View file

@ -56,8 +56,14 @@ func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
func decodeSize(data string, index int) (int, int) { func decodeSize(data string, index int) (int, int) {
var size int // Fast-path for common case of a single byte, value 0..127.
for shift := uint(0); ; shift += 7 { b := data[index]
index++
if b < 0x80 {
return int(b), index
}
size := int(b & 0x7F)
for shift := uint(7); ; shift += 7 {
// Just panic if we go of the end of data, since all Labels strings are constructed internally and // Just panic if we go of the end of data, since all Labels strings are constructed internally and
// malformed data indicates a bug, or memory corruption. // malformed data indicates a bug, or memory corruption.
b := data[index] b := data[index]
@ -158,7 +164,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels {
b.Del(MetricName) b.Del(MetricName)
b.Del(names...) b.Del(names...)
} }
return b.Labels(EmptyLabels()) return b.Labels()
} }
// Hash returns a hash value for the label set. // Hash returns a hash value for the label set.
@ -602,8 +608,8 @@ func (b *Builder) Get(n string) string {
// Range calls f on each label in the Builder. // Range calls f on each label in the Builder.
func (b *Builder) Range(f func(l Label)) { func (b *Builder) Range(f func(l Label)) {
// Stack-based arrays to avoid heap allocation in most cases. // Stack-based arrays to avoid heap allocation in most cases.
var addStack [1024]Label var addStack [128]Label
var delStack [1024]string var delStack [128]string
// Take a copy of add and del, so they are unaffected by calls to Set() or Del(). // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) { b.base.Range(func(l Label) {
@ -625,10 +631,9 @@ func contains(s []Label, n string) bool {
return false return false
} }
// Labels returns the labels from the builder, adding them to res if non-nil. // Labels returns the labels from the builder.
// Argument res can be the same as b.base, if caller wants to overwrite that slice.
// If no modifications were made, the original labels are returned. // If no modifications were made, the original labels are returned.
func (b *Builder) Labels(res Labels) Labels { func (b *Builder) Labels() Labels {
if len(b.del) == 0 && len(b.add) == 0 { if len(b.del) == 0 && len(b.add) == 0 {
return b.base return b.base
} }
@ -638,7 +643,7 @@ func (b *Builder) Labels(res Labels) Labels {
a, d := 0, 0 a, d := 0, 0
bufSize := len(b.base.data) + labelsSize(b.add) bufSize := len(b.base.data) + labelsSize(b.add)
buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. buf := make([]byte, 0, bufSize)
for pos := 0; pos < len(b.base.data); { for pos := 0; pos < len(b.base.data); {
oldPos := pos oldPos := pos
var lName string var lName string
@ -813,7 +818,7 @@ func (b *ScratchBuilder) Labels() Labels {
} }
// Write the newly-built Labels out to ls, reusing an internal buffer. // Write the newly-built Labels out to ls, reusing an internal buffer.
// Callers must ensure that there are no other references to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) { func (b *ScratchBuilder) Overwrite(ls *Labels) {
size := labelsSize(b.add) size := labelsSize(b.add)
if size <= cap(b.overwriteBuffer) { if size <= cap(b.overwriteBuffer) {

View file

@ -211,7 +211,7 @@ func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool)
if !ProcessBuilder(lb, cfgs...) { if !ProcessBuilder(lb, cfgs...) {
return labels.EmptyLabels(), false return labels.EmptyLabels(), false
} }
return lb.Labels(lbls), true return lb.Labels(), true
} }
// ProcessBuilder is like Process, but the caller passes a labels.Builder // ProcessBuilder is like Process, but the caller passes a labels.Builder

View file

@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string {
return s return s
} }
// Exemplar writes the exemplar of the current sample into the passed // Exemplar implements the Parser interface. However, since the classic
// exemplar. It returns if an exemplar exists. // Prometheus text format does not support exemplars, this implementation simply
func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { // returns false and does nothing else.
func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
return false return false
} }

View file

@ -20,6 +20,11 @@ import (
func (m Sample) T() int64 { return m.Timestamp } func (m Sample) T() int64 { return m.Timestamp }
func (m Sample) V() float64 { return m.Value } func (m Sample) V() float64 { return m.Value }
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
size := r.Size() size := r.Size()
data, ok := p.Get().(*[]byte) data, ok := p.Get().(*[]byte)

View file

@ -134,21 +134,24 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
type Chunk_Encoding int32 type Chunk_Encoding int32
const ( const (
Chunk_UNKNOWN Chunk_Encoding = 0 Chunk_UNKNOWN Chunk_Encoding = 0
Chunk_XOR Chunk_Encoding = 1 Chunk_XOR Chunk_Encoding = 1
Chunk_HISTOGRAM Chunk_Encoding = 2 Chunk_HISTOGRAM Chunk_Encoding = 2
Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3
) )
var Chunk_Encoding_name = map[int32]string{ var Chunk_Encoding_name = map[int32]string{
0: "UNKNOWN", 0: "UNKNOWN",
1: "XOR", 1: "XOR",
2: "HISTOGRAM", 2: "HISTOGRAM",
3: "FLOAT_HISTOGRAM",
} }
var Chunk_Encoding_value = map[string]int32{ var Chunk_Encoding_value = map[string]int32{
"UNKNOWN": 0, "UNKNOWN": 0,
"XOR": 1, "XOR": 1,
"HISTOGRAM": 2, "HISTOGRAM": 2,
"FLOAT_HISTOGRAM": 3,
} }
func (x Chunk_Encoding) String() string { func (x Chunk_Encoding) String() string {
@ -1143,75 +1146,76 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{ var fileDescriptor_d938547f84707355 = []byte{
// 1081 bytes of a gzipped FileDescriptorProto // 1092 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02, 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02,
0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12, 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12,
0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d, 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea,
0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f, 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c,
0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd, 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce,
0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01, 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9,
0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb, 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf,
0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a, 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9,
0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc, 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e,
0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5, 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d,
0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01, 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73,
0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53, 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca,
0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1, 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c,
0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d, 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3,
0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1, 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6,
0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d, 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97,
0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3, 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c,
0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50, 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca,
0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90, 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24,
0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25, 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62,
0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf, 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf,
0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f, 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7,
0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79, 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87,
0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1, 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a,
0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a, 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80,
0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30, 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29,
0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89, 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38,
0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92,
0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80, 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8,
0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90, 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8,
0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09, 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0,
0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90, 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9,
0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9, 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde,
0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94, 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25,
0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32, 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9,
0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99, 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54,
0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25, 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92,
0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77, 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b,
0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad, 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6,
0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d, 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a,
0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b, 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5,
0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd, 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd,
0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04, 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02,
0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01, 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04,
0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64, 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32,
0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d, 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16,
0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde, 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f,
0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd, 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f,
0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b, 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e,
0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a,
0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3, 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8,
0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff, 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f,
0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab, 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd,
0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50, 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a,
0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3, 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c,
0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d, 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d,
0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e, 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98,
0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc, 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7,
0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17, 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02,
0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b, 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27,
0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b, 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b,
0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac, 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba,
0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce, 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62,
0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07, 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7,
0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda, 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce,
0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff, 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3,
0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00, 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d,
0x13, 0x09, 0x00, 0x00,
} }
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {

View file

@ -169,9 +169,10 @@ message Chunk {
// We require this to match chunkenc.Encoding. // We require this to match chunkenc.Encoding.
enum Encoding { enum Encoding {
UNKNOWN = 0; UNKNOWN = 0;
XOR = 1; XOR = 1;
HISTOGRAM = 2; HISTOGRAM = 2;
FLOAT_HISTOGRAM = 3;
} }
Encoding type = 3; Encoding type = 3;
bytes data = 4; bytes data = 4;

View file

@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
// Cleanup and reload pool if the configuration has changed. // Cleanup and reload pool if the configuration has changed.
var failed bool var failed bool
for name, sp := range m.scrapePools { for name, sp := range m.scrapePools {
if cfg, ok := m.scrapeConfigs[name]; !ok { switch cfg, ok := m.scrapeConfigs[name]; {
case !ok:
sp.stop() sp.stop()
delete(m.scrapePools, name) delete(m.scrapePools, name)
} else if !reflect.DeepEqual(sp.config, cfg) { case !reflect.DeepEqual(sp.config, cfg):
err := sp.reload(cfg) err := sp.reload(cfg)
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)

View file

@ -500,9 +500,13 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
} }
targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
for _, t := range targets { for _, t := range targets {
if !t.Labels().IsEmpty() { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
nonEmpty := false
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
switch {
case nonEmpty:
all = append(all, t) all = append(all, t)
} else if !t.DiscoveredLabels().IsEmpty() { case !t.discoveredLabels.IsEmpty():
sp.droppedTargets = append(sp.droppedTargets, t) sp.droppedTargets = append(sp.droppedTargets, t)
} }
} }
@ -637,7 +641,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
met := lset.Get(labels.MetricName) met := lset.Get(labels.MetricName)
if limits.labelLimit > 0 { if limits.labelLimit > 0 {
nbLabels := lset.Len() nbLabels := lset.Len()
if nbLabels > int(limits.labelLimit) { if nbLabels > limits.labelLimit {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
} }
} }
@ -649,14 +653,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return lset.Validate(func(l labels.Label) error { return lset.Validate(func(l labels.Label) error {
if limits.labelNameLengthLimit > 0 { if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name) nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) { if nameLength > limits.labelNameLengthLimit {
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit) return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
} }
} }
if limits.labelValueLengthLimit > 0 { if limits.labelValueLengthLimit > 0 {
valueLength := len(l.Value) valueLength := len(l.Value)
if valueLength > int(limits.labelValueLengthLimit) { if valueLength > limits.labelValueLengthLimit {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
} }
} }
@ -666,17 +670,16 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
lb := labels.NewBuilder(lset) lb := labels.NewBuilder(lset)
targetLabels := target.Labels()
if honor { if honor {
targetLabels.Range(func(l labels.Label) { target.LabelsRange(func(l labels.Label) {
if !lset.Has(l.Name) { if !lset.Has(l.Name) {
lb.Set(l.Name, l.Value) lb.Set(l.Name, l.Value)
} }
}) })
} else { } else {
var conflictingExposedLabels []labels.Label var conflictingExposedLabels []labels.Label
targetLabels.Range(func(l labels.Label) { target.LabelsRange(func(l labels.Label) {
existingValue := lset.Get(l.Name) existingValue := lset.Get(l.Name)
if existingValue != "" { if existingValue != "" {
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue}) conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
@ -686,11 +689,11 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
}) })
if len(conflictingExposedLabels) > 0 { if len(conflictingExposedLabels) > 0 {
resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels) resolveConflictingExposedLabels(lb, conflictingExposedLabels)
} }
} }
res := lb.Labels(labels.EmptyLabels()) res := lb.Labels()
if len(rc) > 0 { if len(rc) > 0 {
res, _ = relabel.Process(res, rc...) res, _ = relabel.Process(res, rc...)
@ -699,47 +702,32 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
return res return res
} }
func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels labels.Labels, conflictingExposedLabels []labels.Label) { func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { sort.SliceStable(conflictingExposedLabels, func(i, j int) bool {
return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name)
}) })
for i, l := range conflictingExposedLabels { for _, l := range conflictingExposedLabels {
newName := l.Name newName := l.Name
for { for {
newName = model.ExportedLabelPrefix + newName newName = model.ExportedLabelPrefix + newName
if !exposedLabels.Has(newName) && if lb.Get(newName) == "" {
!targetLabels.Has(newName) && lb.Set(newName, l.Value)
!labelSliceHas(conflictingExposedLabels[:i], newName) {
conflictingExposedLabels[i].Name = newName
break break
} }
} }
} }
for _, l := range conflictingExposedLabels {
lb.Set(l.Name, l.Value)
}
}
func labelSliceHas(lbls []labels.Label, name string) bool {
for _, l := range lbls {
if l.Name == name {
return true
}
}
return false
} }
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels { func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
lb := labels.NewBuilder(lset) lb := labels.NewBuilder(lset)
target.Labels().Range(func(l labels.Label) { target.LabelsRange(func(l labels.Label) {
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name)) lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
lb.Set(l.Name, l.Value) lb.Set(l.Name, l.Value)
}) })
return lb.Labels(labels.EmptyLabels()) return lb.Labels()
} }
// appender returns an appender for ingested samples from the target. // appender returns an appender for ingested samples from the target.
@ -959,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) {
count := len(c.series) + len(c.droppedSeries) + len(c.metadata) count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
c.metaMtx.Unlock() c.metaMtx.Unlock()
if flushCache { switch {
case flushCache:
c.successfulCount = count c.successfulCount = count
} else if count > c.successfulCount*2+1000 { case count > c.successfulCount*2+1000:
// If a target had varying labels in scrapes that ultimately failed, // If a target had varying labels in scrapes that ultimately failed,
// the caches would grow indefinitely. Force a flush when this happens. // the caches would grow indefinitely. Force a flush when this happens.
// We use the heuristic that this is a doubling of the cache size // We use the heuristic that this is a doubling of the cache size

View file

@ -181,6 +181,15 @@ func (t *Target) Labels() labels.Labels {
return b.Labels() return b.Labels()
} }
// LabelsRange calls f on each public label of the target.
func (t *Target) LabelsRange(f func(l labels.Label)) {
t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
f(l)
}
})
}
// DiscoveredLabels returns a copy of the target's labels before any processing. // DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels { func (t *Target) DiscoveredLabels() labels.Labels {
t.mtx.Lock() t.mtx.Lock()
@ -371,7 +380,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
} }
} }
preRelabelLabels := lb.Labels(labels.EmptyLabels()) preRelabelLabels := lb.Labels()
keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...)
// Check if the target was dropped. // Check if the target was dropped.
@ -404,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
// Addresses reaching this point are already wrapped in [] if necessary. // Addresses reaching this point are already wrapped in [] if necessary.
switch scheme { switch scheme {
case "http", "": case "http", "":
addr = addr + ":80" addr += ":80"
case "https": case "https":
addr = addr + ":443" addr += ":443"
default: default:
return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme)
} }
@ -467,7 +476,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
lb.Set(model.InstanceLabel, addr) lb.Set(model.InstanceLabel, addr)
} }
res = lb.Labels(labels.EmptyLabels()) res = lb.Labels()
err = res.Validate(func(l labels.Label) error { err = res.Validate(func(l labels.Label) error {
// Check label values are valid, drop the target if not. // Check label values are valid, drop the target if not.
if !model.LabelValue(l.Value).IsValid() { if !model.LabelValue(l.Value).IsValid() {

View file

@ -19,6 +19,7 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
) )
// BufferedSeriesIterator wraps an iterator with a look-back buffer. // BufferedSeriesIterator wraps an iterator with a look-back buffer.
@ -43,7 +44,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator {
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
// TODO(codesome): based on encoding, allocate different buffer. // TODO(codesome): based on encoding, allocate different buffer.
bit := &BufferedSeriesIterator{ bit := &BufferedSeriesIterator{
buf: newSampleRing(delta, 16), buf: newSampleRing(delta, 0, chunkenc.ValNone),
delta: delta, delta: delta,
} }
bit.Reset(it) bit.Reset(it)
@ -68,11 +69,8 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
// PeekBack returns the nth previous element of the iterator. If there is none buffered, // PeekBack returns the nth previous element of the iterator. If there is none buffered,
// ok is false. // ok is false.
func (b *BufferedSeriesIterator) PeekBack(n int) ( func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) {
t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool, return b.buf.nthLast(n)
) {
s, ok := b.buf.nthLast(n)
return s.t, s.v, s.h, s.fh, ok
} }
// Buffer returns an iterator over the buffered data. Invalidates previously // Buffer returns an iterator over the buffered data. Invalidates previously
@ -122,14 +120,14 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType {
case chunkenc.ValNone: case chunkenc.ValNone:
return chunkenc.ValNone return chunkenc.ValNone
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, v := b.it.At() t, f := b.it.At()
b.buf.add(sample{t: t, v: v}) b.buf.addF(fSample{t: t, f: f})
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
t, h := b.it.AtHistogram() t, h := b.it.AtHistogram()
b.buf.add(sample{t: t, h: h}) b.buf.addH(hSample{t: t, h: h})
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
t, fh := b.it.AtFloatHistogram() t, fh := b.it.AtFloatHistogram()
b.buf.add(sample{t: t, fh: fh}) b.buf.addFH(fhSample{t: t, fh: fh})
default: default:
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
} }
@ -166,56 +164,133 @@ func (b *BufferedSeriesIterator) Err() error {
return b.it.Err() return b.it.Err()
} }
// TODO(beorn7): Consider having different sample types for different value types. type fSample struct {
type sample struct { t int64
t int64 f float64
v float64
h *histogram.Histogram
fh *histogram.FloatHistogram
} }
func (s sample) T() int64 { func (s fSample) T() int64 {
return s.t return s.t
} }
func (s sample) V() float64 { func (s fSample) F() float64 {
return s.v return s.f
} }
func (s sample) H() *histogram.Histogram { func (s fSample) H() *histogram.Histogram {
panic("H() called for fSample")
}
func (s fSample) FH() *histogram.FloatHistogram {
panic("FH() called for fSample")
}
func (s fSample) Type() chunkenc.ValueType {
return chunkenc.ValFloat
}
type hSample struct {
t int64
h *histogram.Histogram
}
func (s hSample) T() int64 {
return s.t
}
func (s hSample) F() float64 {
panic("F() called for hSample")
}
func (s hSample) H() *histogram.Histogram {
return s.h return s.h
} }
func (s sample) FH() *histogram.FloatHistogram { func (s hSample) FH() *histogram.FloatHistogram {
return s.h.ToFloat()
}
func (s hSample) Type() chunkenc.ValueType {
return chunkenc.ValHistogram
}
type fhSample struct {
t int64
fh *histogram.FloatHistogram
}
func (s fhSample) T() int64 {
return s.t
}
func (s fhSample) F() float64 {
panic("F() called for fhSample")
}
func (s fhSample) H() *histogram.Histogram {
panic("H() called for fhSample")
}
func (s fhSample) FH() *histogram.FloatHistogram {
return s.fh return s.fh
} }
func (s sample) Type() chunkenc.ValueType { func (s fhSample) Type() chunkenc.ValueType {
switch { return chunkenc.ValFloatHistogram
case s.h != nil:
return chunkenc.ValHistogram
case s.fh != nil:
return chunkenc.ValFloatHistogram
default:
return chunkenc.ValFloat
}
} }
type sampleRing struct { type sampleRing struct {
delta int64 delta int64
buf []sample // lookback buffer // Lookback buffers. We use iBuf for mixed samples, but one of the three
i int // position of most recent element in ring buffer // concrete ones for homogenous samples. (Only one of the four bufs is
f int // position of first element in ring buffer // allowed to be populated!) This avoids the overhead of the interface
l int // number of elements in buffer // wrapper for the happy (and by far most common) case of homogenous
// samples.
iBuf []tsdbutil.Sample
fBuf []fSample
hBuf []hSample
fhBuf []fhSample
bufInUse bufType
i int // Position of most recent element in ring buffer.
f int // Position of first element in ring buffer.
l int // Number of elements in buffer.
it sampleRingIterator it sampleRingIterator
} }
func newSampleRing(delta int64, sz int) *sampleRing { type bufType int
r := &sampleRing{delta: delta, buf: make([]sample, sz)}
r.reset()
const (
noBuf bufType = iota // Nothing yet stored in sampleRing.
iBuf
fBuf
hBuf
fhBuf
)
// newSampleRing creates a new sampleRing. If you do not know the prefereed
// value type yet, use a size of 0 (in which case the provided typ doesn't
// matter). On the first add, a buffer of size 16 will be allocated with the
// preferred type being the type of the first added sample.
func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
r := &sampleRing{delta: delta}
r.reset()
if size <= 0 {
// Will initialize on first add.
return r
}
switch typ {
case chunkenc.ValFloat:
r.fBuf = make([]fSample, size)
case chunkenc.ValHistogram:
r.hBuf = make([]hSample, size)
case chunkenc.ValFloatHistogram:
r.fhBuf = make([]fhSample, size)
default:
r.iBuf = make([]tsdbutil.Sample, size)
}
return r return r
} }
@ -223,6 +298,7 @@ func (r *sampleRing) reset() {
r.l = 0 r.l = 0
r.i = -1 r.i = -1
r.f = 0 r.f = 0
r.bufInUse = noBuf
} }
// Returns the current iterator. Invalidates previously returned iterators. // Returns the current iterator. Invalidates previously returned iterators.
@ -236,7 +312,7 @@ type sampleRingIterator struct {
r *sampleRing r *sampleRing
i int i int
t int64 t int64
v float64 f float64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram fh *histogram.FloatHistogram
} }
@ -246,17 +322,36 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType {
if it.i >= it.r.l { if it.i >= it.r.l {
return chunkenc.ValNone return chunkenc.ValNone
} }
s := it.r.at(it.i) switch it.r.bufInUse {
it.t = s.t case fBuf:
switch { s := it.r.atF(it.i)
case s.h != nil: it.t = s.t
it.f = s.f
return chunkenc.ValFloat
case hBuf:
s := it.r.atH(it.i)
it.t = s.t
it.h = s.h it.h = s.h
return chunkenc.ValHistogram return chunkenc.ValHistogram
case s.fh != nil: case fhBuf:
s := it.r.atFH(it.i)
it.t = s.t
it.fh = s.fh it.fh = s.fh
return chunkenc.ValFloatHistogram return chunkenc.ValFloatHistogram
}
s := it.r.at(it.i)
it.t = s.T()
switch s.Type() {
case chunkenc.ValHistogram:
it.h = s.H()
it.fh = nil
return chunkenc.ValHistogram
case chunkenc.ValFloatHistogram:
it.fh = s.FH()
it.h = nil
return chunkenc.ValFloatHistogram
default: default:
it.v = s.v it.f = s.F()
return chunkenc.ValFloat return chunkenc.ValFloat
} }
} }
@ -270,7 +365,7 @@ func (it *sampleRingIterator) Err() error {
} }
func (it *sampleRingIterator) At() (int64, float64) { func (it *sampleRingIterator) At() (int64, float64) {
return it.t, it.v return it.t, it.f
} }
func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) {
@ -288,22 +383,204 @@ func (it *sampleRingIterator) AtT() int64 {
return it.t return it.t
} }
func (r *sampleRing) at(i int) sample { func (r *sampleRing) at(i int) tsdbutil.Sample {
j := (r.f + i) % len(r.buf) j := (r.f + i) % len(r.iBuf)
return r.buf[j] return r.iBuf[j]
} }
// add adds a sample to the ring buffer and frees all samples that fall func (r *sampleRing) atF(i int) fSample {
// out of the delta range. j := (r.f + i) % len(r.fBuf)
func (r *sampleRing) add(s sample) { return r.fBuf[j]
l := len(r.buf) }
// Grow the ring buffer if it fits no more elements.
if l == r.l {
buf := make([]sample, 2*l)
copy(buf[l+r.f:], r.buf[r.f:])
copy(buf, r.buf[:r.f])
r.buf = buf func (r *sampleRing) atH(i int) hSample {
j := (r.f + i) % len(r.hBuf)
return r.hBuf[j]
}
func (r *sampleRing) atFH(i int) fhSample {
j := (r.f + i) % len(r.fhBuf)
return r.fhBuf[j]
}
// add adds a sample to the ring buffer and frees all samples that fall out of
// the delta range. Note that this method works for any sample
// implementation. If you know you are dealing with one of the implementations
// from this package (fSample, hSample, fhSample), call one of the specialized
// methods addF, addH, or addFH for better performance.
func (r *sampleRing) add(s tsdbutil.Sample) {
if r.bufInUse == noBuf {
// First sample.
switch s := s.(type) {
case fSample:
r.bufInUse = fBuf
r.fBuf = addF(s, r.fBuf, r)
case hSample:
r.bufInUse = hBuf
r.hBuf = addH(s, r.hBuf, r)
case fhSample:
r.bufInUse = fhBuf
r.fhBuf = addFH(s, r.fhBuf, r)
}
return
}
if r.bufInUse != iBuf {
// Nothing added to the interface buf yet. Let's check if we can
// stay specialized.
switch s := s.(type) {
case fSample:
if r.bufInUse == fBuf {
r.fBuf = addF(s, r.fBuf, r)
return
}
case hSample:
if r.bufInUse == hBuf {
r.hBuf = addH(s, r.hBuf, r)
return
}
case fhSample:
if r.bufInUse == fhBuf {
r.fhBuf = addFH(s, r.fhBuf, r)
return
}
}
// The new sample isn't a fit for the already existing
// ones. Copy the latter into the interface buffer where needed.
switch r.bufInUse {
case fBuf:
for _, s := range r.fBuf {
r.iBuf = append(r.iBuf, s)
}
r.fBuf = nil
case hBuf:
for _, s := range r.hBuf {
r.iBuf = append(r.iBuf, s)
}
r.hBuf = nil
case fhBuf:
for _, s := range r.fhBuf {
r.iBuf = append(r.iBuf, s)
}
r.fhBuf = nil
}
r.bufInUse = iBuf
}
r.iBuf = addSample(s, r.iBuf, r)
}
// addF is a version of the add method specialized for fSample.
func (r *sampleRing) addF(s fSample) {
switch r.bufInUse {
case fBuf: // Add to existing fSamples.
r.fBuf = addF(s, r.fBuf, r)
case noBuf: // Add first sample.
r.fBuf = addF(s, r.fBuf, r)
r.bufInUse = fBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not fSamples.
// Need to call the checked add method for conversion.
r.add(s)
}
}
// addH is a version of the add method specialized for hSample.
func (r *sampleRing) addH(s hSample) {
switch r.bufInUse {
case hBuf: // Add to existing hSamples.
r.hBuf = addH(s, r.hBuf, r)
case noBuf: // Add first sample.
r.hBuf = addH(s, r.hBuf, r)
r.bufInUse = hBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not hSamples.
// Need to call the checked add method for conversion.
r.add(s)
}
}
// addFH is a version of the add method specialized for fhSample.
func (r *sampleRing) addFH(s fhSample) {
switch r.bufInUse {
case fhBuf: // Add to existing fhSamples.
r.fhBuf = addFH(s, r.fhBuf, r)
case noBuf: // Add first sample.
r.fhBuf = addFH(s, r.fhBuf, r)
r.bufInUse = fhBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not fhSamples.
// Need to call the checked add method for conversion.
r.add(s)
}
}
// genericAdd is a generic implementation of adding a tsdbutil.Sample
// implementation to a buffer of a sample ring. However, the Go compiler
// currently (go1.20) decides to not expand the code during compile time, but
// creates dynamic code to handle the different types. That has a significant
// overhead during runtime, noticeable in PromQL benchmarks. For example, the
// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7%
// longer runtime, 9% higher allocation size, and 10% more allocations.
// Therefore, genericAdd has been manually implemented for all the types
// (addSample, addF, addH, addFH) below.
//
// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T {
// l := len(buf)
// // Grow the ring buffer if it fits no more elements.
// if l == 0 {
// buf = make([]T, 16)
// l = 16
// }
// if l == r.l {
// newBuf := make([]T, 2*l)
// copy(newBuf[l+r.f:], buf[r.f:])
// copy(newBuf, buf[:r.f])
//
// buf = newBuf
// r.i = r.f
// r.f += l
// l = 2 * l
// } else {
// r.i++
// if r.i >= l {
// r.i -= l
// }
// }
//
// buf[r.i] = s
// r.l++
//
// // Free head of the buffer of samples that just fell out of the range.
// tmin := s.T() - r.delta
// for buf[r.f].T() < tmin {
// r.f++
// if r.f >= l {
// r.f -= l
// }
// r.l--
// }
// return buf
// }
// addSample is a handcoded specialization of genericAdd (see above).
func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample {
l := len(buf)
// Grow the ring buffer if it fits no more elements.
if l == 0 {
buf = make([]tsdbutil.Sample, 16)
l = 16
}
if l == r.l {
newBuf := make([]tsdbutil.Sample, 2*l)
copy(newBuf[l+r.f:], buf[r.f:])
copy(newBuf, buf[:r.f])
buf = newBuf
r.i = r.f r.i = r.f
r.f += l r.f += l
l = 2 * l l = 2 * l
@ -314,18 +591,136 @@ func (r *sampleRing) add(s sample) {
} }
} }
r.buf[r.i] = s buf[r.i] = s
r.l++ r.l++
// Free head of the buffer of samples that just fell out of the range. // Free head of the buffer of samples that just fell out of the range.
tmin := s.t - r.delta tmin := s.T() - r.delta
for r.buf[r.f].t < tmin { for buf[r.f].T() < tmin {
r.f++ r.f++
if r.f >= l { if r.f >= l {
r.f -= l r.f -= l
} }
r.l-- r.l--
} }
return buf
}
// addF is a handcoded specialization of genericAdd (see above).
func addF(s fSample, buf []fSample, r *sampleRing) []fSample {
l := len(buf)
// Grow the ring buffer if it fits no more elements.
if l == 0 {
buf = make([]fSample, 16)
l = 16
}
if l == r.l {
newBuf := make([]fSample, 2*l)
copy(newBuf[l+r.f:], buf[r.f:])
copy(newBuf, buf[:r.f])
buf = newBuf
r.i = r.f
r.f += l
l = 2 * l
} else {
r.i++
if r.i >= l {
r.i -= l
}
}
buf[r.i] = s
r.l++
// Free head of the buffer of samples that just fell out of the range.
tmin := s.T() - r.delta
for buf[r.f].T() < tmin {
r.f++
if r.f >= l {
r.f -= l
}
r.l--
}
return buf
}
// addH is a handcoded specialization of genericAdd (see above).
func addH(s hSample, buf []hSample, r *sampleRing) []hSample {
l := len(buf)
// Grow the ring buffer if it fits no more elements.
if l == 0 {
buf = make([]hSample, 16)
l = 16
}
if l == r.l {
newBuf := make([]hSample, 2*l)
copy(newBuf[l+r.f:], buf[r.f:])
copy(newBuf, buf[:r.f])
buf = newBuf
r.i = r.f
r.f += l
l = 2 * l
} else {
r.i++
if r.i >= l {
r.i -= l
}
}
buf[r.i] = s
r.l++
// Free head of the buffer of samples that just fell out of the range.
tmin := s.T() - r.delta
for buf[r.f].T() < tmin {
r.f++
if r.f >= l {
r.f -= l
}
r.l--
}
return buf
}
// addFH is a handcoded specialization of genericAdd (see above).
func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample {
l := len(buf)
// Grow the ring buffer if it fits no more elements.
if l == 0 {
buf = make([]fhSample, 16)
l = 16
}
if l == r.l {
newBuf := make([]fhSample, 2*l)
copy(newBuf[l+r.f:], buf[r.f:])
copy(newBuf, buf[:r.f])
buf = newBuf
r.i = r.f
r.f += l
l = 2 * l
} else {
r.i++
if r.i >= l {
r.i -= l
}
}
buf[r.i] = s
r.l++
// Free head of the buffer of samples that just fell out of the range.
tmin := s.T() - r.delta
for buf[r.f].T() < tmin {
r.f++
if r.f >= l {
r.f -= l
}
r.l--
}
return buf
} }
// reduceDelta lowers the buffered time delta, dropping any samples that are // reduceDelta lowers the buffered time delta, dropping any samples that are
@ -340,39 +735,98 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
return true return true
} }
switch r.bufInUse {
case fBuf:
genericReduceDelta(r.fBuf, r)
case hBuf:
genericReduceDelta(r.hBuf, r)
case fhBuf:
genericReduceDelta(r.fhBuf, r)
default:
genericReduceDelta(r.iBuf, r)
}
return true
}
func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) {
// Free head of the buffer of samples that just fell out of the range. // Free head of the buffer of samples that just fell out of the range.
l := len(r.buf) l := len(buf)
tmin := r.buf[r.i].t - delta tmin := buf[r.i].T() - r.delta
for r.buf[r.f].t < tmin { for buf[r.f].T() < tmin {
r.f++ r.f++
if r.f >= l { if r.f >= l {
r.f -= l r.f -= l
} }
r.l-- r.l--
} }
return true
} }
// nthLast returns the nth most recent element added to the ring. // nthLast returns the nth most recent element added to the ring.
func (r *sampleRing) nthLast(n int) (sample, bool) { func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) {
if n > r.l { if n > r.l {
return sample{}, false return fSample{}, false
}
i := r.l - n
switch r.bufInUse {
case fBuf:
return r.atF(i), true
case hBuf:
return r.atH(i), true
case fhBuf:
return r.atFH(i), true
default:
return r.at(i), true
} }
return r.at(r.l - n), true
} }
func (r *sampleRing) samples() []sample { func (r *sampleRing) samples() []tsdbutil.Sample {
res := make([]sample, r.l) res := make([]tsdbutil.Sample, r.l)
k := r.f + r.l k := r.f + r.l
var j int var j int
if k > len(r.buf) {
k = len(r.buf)
j = r.l - k + r.f
}
n := copy(res, r.buf[r.f:k]) switch r.bufInUse {
copy(res[n:], r.buf[:j]) case iBuf:
if k > len(r.iBuf) {
k = len(r.iBuf)
j = r.l - k + r.f
}
n := copy(res, r.iBuf[r.f:k])
copy(res[n:], r.iBuf[:j])
case fBuf:
if k > len(r.fBuf) {
k = len(r.fBuf)
j = r.l - k + r.f
}
resF := make([]fSample, r.l)
n := copy(resF, r.fBuf[r.f:k])
copy(resF[n:], r.fBuf[:j])
for i, s := range resF {
res[i] = s
}
case hBuf:
if k > len(r.hBuf) {
k = len(r.hBuf)
j = r.l - k + r.f
}
resH := make([]hSample, r.l)
n := copy(resH, r.hBuf[r.f:k])
copy(resH[n:], r.hBuf[:j])
for i, s := range resH {
res[i] = s
}
case fhBuf:
if k > len(r.fhBuf) {
k = len(r.fhBuf)
j = r.l - k + r.f
}
resFH := make([]fhSample, r.l)
n := copy(resFH, r.fhBuf[r.f:k])
copy(resFH[n:], r.fhBuf[:j])
for i, s := range resFH {
res[i] = s
}
}
return res return res
} }

View file

@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) {
for _, appender := range f.secondaries { for _, appender := range f.secondaries {
rollbackErr := appender.Rollback() rollbackErr := appender.Rollback()
if err == nil { switch {
case err == nil:
err = rollbackErr err = rollbackErr
} else if rollbackErr != nil { case rollbackErr != nil:
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
} }
} }

View file

@ -99,7 +99,7 @@ type MockQueryable struct {
MockQuerier Querier MockQuerier Querier
} }
func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) {
return q.MockQuerier, nil return q.MockQuerier, nil
} }
@ -118,11 +118,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil return nil, nil, nil
} }
func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil return nil, nil, nil
} }

View file

@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string {
res := make([]string, 0, maxl*10/9) res := make([]string, 0, maxl*10/9)
for len(a) > 0 && len(b) > 0 { for len(a) > 0 && len(b) > 0 {
if a[0] == b[0] { switch {
case a[0] == b[0]:
res = append(res, a[0]) res = append(res, a[0])
a, b = a[1:], b[1:] a, b = a[1:], b[1:]
} else if a[0] < b[0] { case a[0] < b[0]:
res = append(res, a[0]) res = append(res, a[0])
a = a[1:] a = a[1:]
} else { default:
res = append(res, b[0]) res = append(res, b[0])
b = b[1:] b = b[1:]
} }
@ -722,12 +723,11 @@ func (c *compactChunkIterator) Next() bool {
break break
} }
if next.MinTime == prev.MinTime && // Only do something if it is not a perfect duplicate.
next.MaxTime == prev.MaxTime && if next.MinTime != prev.MinTime ||
bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { next.MaxTime != prev.MaxTime ||
// 1:1 duplicates, skip it. !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
} else { // We operate on same series, so labels do not matter here.
// We operate on same series, so labels does not matter here.
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
if next.MaxTime > oMaxTime { if next.MaxTime > oMaxTime {
oMaxTime = next.MaxTime oMaxTime = next.MaxTime

View file

@ -80,7 +80,7 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint. // Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct { type Client struct {
remoteName string // Used to differentiate clients in metrics. remoteName string // Used to differentiate clients in metrics.
url *config_util.URL urlString string // url.String()
Client *http.Client Client *http.Client
timeout time.Duration timeout time.Duration
@ -122,7 +122,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
return &Client{ return &Client{
remoteName: name, remoteName: name,
url: conf.URL, urlString: conf.URL.String(),
Client: httpClient, Client: httpClient,
timeout: time.Duration(conf.Timeout), timeout: time.Duration(conf.Timeout),
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()), readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
@ -154,7 +154,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
return &Client{ return &Client{
remoteName: name, remoteName: name,
url: conf.URL, urlString: conf.URL.String(),
Client: httpClient, Client: httpClient,
retryOnRateLimit: conf.RetryOnRateLimit, retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout), timeout: time.Duration(conf.Timeout),
@ -187,7 +187,7 @@ type RecoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go. // and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte) error { func (c *Client) Store(ctx context.Context, req []byte) error {
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req)) httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req))
if err != nil { if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not // Errors from NewRequest are from unparsable URLs, so are not
// recoverable. // recoverable.
@ -255,7 +255,7 @@ func (c Client) Name() string {
// Endpoint is the remote read or write endpoint. // Endpoint is the remote read or write endpoint.
func (c Client) Endpoint() string { func (c Client) Endpoint() string {
return c.url.String() return c.urlString
} }
// Read reads from a remote endpoint. // Read reads from a remote endpoint.
@ -276,7 +276,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
} }
compressed := snappy.Encode(nil, data) compressed := snappy.Encode(nil, data)
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed)) httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(compressed))
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create request: %w", err) return nil, fmt.Errorf("unable to create request: %w", err)
} }
@ -310,7 +310,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
} }
if httpResp.StatusCode/100 != 2 { if httpResp.StatusCode/100 != 2 {
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed))) return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
} }
uncompressed, err := snappy.Decode(nil, compressed) uncompressed, err := snappy.Decode(nil, compressed)

View file

@ -17,6 +17,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"sort" "sort"
"strings" "strings"
@ -120,10 +121,13 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
iter = series.Iterator(iter) iter = series.Iterator(iter)
samples := []prompb.Sample{}
for iter.Next() == chunkenc.ValFloat { var (
// TODO(beorn7): Add Histogram support. samples []prompb.Sample
histograms []prompb.Histogram
)
for valType := iter.Next(); valType != chunkenc.ValNone; valType = iter.Next() {
numSamples++ numSamples++
if sampleLimit > 0 && numSamples > sampleLimit { if sampleLimit > 0 && numSamples > sampleLimit {
return nil, ss.Warnings(), HTTPError{ return nil, ss.Warnings(), HTTPError{
@ -131,19 +135,32 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
status: http.StatusBadRequest, status: http.StatusBadRequest,
} }
} }
ts, val := iter.At()
samples = append(samples, prompb.Sample{ switch valType {
Timestamp: ts, case chunkenc.ValFloat:
Value: val, ts, val := iter.At()
}) samples = append(samples, prompb.Sample{
Timestamp: ts,
Value: val,
})
case chunkenc.ValHistogram:
ts, h := iter.AtHistogram()
histograms = append(histograms, HistogramToHistogramProto(ts, h))
case chunkenc.ValFloatHistogram:
ts, fh := iter.AtFloatHistogram()
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
default:
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
}
} }
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return nil, ss.Warnings(), err return nil, ss.Warnings(), err
} }
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
Labels: labelsToLabelsProto(series.Labels(), nil), Labels: labelsToLabelsProto(series.Labels(), nil),
Samples: samples, Samples: samples,
Histograms: histograms,
}) })
} }
return resp, ss.Warnings(), ss.Err() return resp, ss.Warnings(), ss.Err()
@ -157,7 +174,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
return errSeriesSet{err: err} return errSeriesSet{err: err}
} }
lbls := labelProtosToLabels(ts.Labels) lbls := labelProtosToLabels(ts.Labels)
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples}) series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
} }
if sortSeries { if sortSeries {
@ -274,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
result := make([]prompb.Label, 0, len(primary)+len(secondary)) result := make([]prompb.Label, 0, len(primary)+len(secondary))
i, j := 0, 0 i, j := 0, 0
for i < len(primary) && j < len(secondary) { for i < len(primary) && j < len(secondary) {
if primary[i].Name < secondary[j].Name { switch {
case primary[i].Name < secondary[j].Name:
result = append(result, primary[i]) result = append(result, primary[i])
i++ i++
} else if primary[i].Name > secondary[j].Name { case primary[i].Name > secondary[j].Name:
result = append(result, secondary[j]) result = append(result, secondary[j])
j++ j++
} else { default:
result = append(result, primary[i]) result = append(result, primary[i])
i++ i++
j++ j++
@ -343,8 +361,9 @@ func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil }
// concreteSeries implements storage.Series. // concreteSeries implements storage.Series.
type concreteSeries struct { type concreteSeries struct {
labels labels.Labels labels labels.Labels
samples []prompb.Sample floats []prompb.Sample
histograms []prompb.Histogram
} }
func (c *concreteSeries) Labels() labels.Labels { func (c *concreteSeries) Labels() labels.Labels {
@ -356,84 +375,165 @@ func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
csi.reset(c) csi.reset(c)
return csi return csi
} }
return newConcreteSeriersIterator(c) return newConcreteSeriesIterator(c)
} }
// concreteSeriesIterator implements storage.SeriesIterator. // concreteSeriesIterator implements storage.SeriesIterator.
type concreteSeriesIterator struct { type concreteSeriesIterator struct {
cur int floatsCur int
series *concreteSeries histogramsCur int
curValType chunkenc.ValueType
series *concreteSeries
} }
func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator {
return &concreteSeriesIterator{ return &concreteSeriesIterator{
cur: -1, floatsCur: -1,
series: series, histogramsCur: -1,
curValType: chunkenc.ValNone,
series: series,
} }
} }
func (c *concreteSeriesIterator) reset(series *concreteSeries) { func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.cur = -1 c.floatsCur = -1
c.histogramsCur = -1
c.curValType = chunkenc.ValNone
c.series = series c.series = series
} }
// Seek implements storage.SeriesIterator. // Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType { func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.cur == -1 { if c.floatsCur == -1 {
c.cur = 0 c.floatsCur = 0
} }
if c.cur >= len(c.series.samples) { if c.histogramsCur == -1 {
c.histogramsCur = 0
}
if c.floatsCur >= len(c.series.floats) && c.histogramsCur >= len(c.series.histograms) {
return chunkenc.ValNone return chunkenc.ValNone
} }
// No-op check. // No-op check.
if s := c.series.samples[c.cur]; s.Timestamp >= t { if (c.curValType == chunkenc.ValFloat && c.series.floats[c.floatsCur].Timestamp >= t) ||
return chunkenc.ValFloat ((c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram) && c.series.histograms[c.histogramsCur].Timestamp >= t) {
return c.curValType
} }
// Do binary search between current position and end.
c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool { c.curValType = chunkenc.ValNone
return c.series.samples[n+c.cur].Timestamp >= t
// Binary search between current position and end for both float and histograms samples.
c.floatsCur += sort.Search(len(c.series.floats)-c.floatsCur, func(n int) bool {
return c.series.floats[n+c.floatsCur].Timestamp >= t
}) })
if c.cur < len(c.series.samples) { c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool {
return chunkenc.ValFloat return c.series.histograms[n+c.histogramsCur].Timestamp >= t
})
switch {
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
c.curValType = chunkenc.ValFloat
} else {
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
}
// When the timestamps do not overlap the cursor for the non-selected sample type has advanced too
// far; we decrement it back down here.
if c.series.floats[c.floatsCur].Timestamp != c.series.histograms[c.histogramsCur].Timestamp {
if c.curValType == chunkenc.ValFloat {
c.histogramsCur--
} else {
c.floatsCur--
}
}
case c.floatsCur < len(c.series.floats):
c.curValType = chunkenc.ValFloat
case c.histogramsCur < len(c.series.histograms):
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
} }
return chunkenc.ValNone return c.curValType
// TODO(beorn7): Add histogram support. }
func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType {
if h.IsFloatHistogram() {
return chunkenc.ValFloatHistogram
}
return chunkenc.ValHistogram
} }
// At implements chunkenc.Iterator. // At implements chunkenc.Iterator.
func (c *concreteSeriesIterator) At() (t int64, v float64) { func (c *concreteSeriesIterator) At() (t int64, v float64) {
s := c.series.samples[c.cur] if c.curValType != chunkenc.ValFloat {
panic("iterator is not on a float sample")
}
s := c.series.floats[c.floatsCur]
return s.Timestamp, s.Value return s.Timestamp, s.Value
} }
// AtHistogram always returns (0, nil) because there is no support for histogram // AtHistogram implements chunkenc.Iterator
// values yet.
// TODO(beorn7): Fix that for histogram support in remote storage.
func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
return 0, nil if c.curValType != chunkenc.ValHistogram {
panic("iterator is not on an integer histogram sample")
}
h := c.series.histograms[c.histogramsCur]
return h.Timestamp, HistogramProtoToHistogram(h)
} }
// AtFloatHistogram always returns (0, nil) because there is no support for histogram // AtFloatHistogram implements chunkenc.Iterator
// values yet.
// TODO(beorn7): Fix that for histogram support in remote storage.
func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
return 0, nil switch c.curValType {
case chunkenc.ValHistogram:
fh := c.series.histograms[c.histogramsCur]
return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
case chunkenc.ValFloatHistogram:
fh := c.series.histograms[c.histogramsCur]
return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
default:
panic("iterator is not on a histogram sample")
}
} }
// AtT implements chunkenc.Iterator. // AtT implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtT() int64 { func (c *concreteSeriesIterator) AtT() int64 {
s := c.series.samples[c.cur] if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
return s.Timestamp return c.series.histograms[c.histogramsCur].Timestamp
}
return c.series.floats[c.floatsCur].Timestamp
} }
const noTS = int64(math.MaxInt64)
// Next implements chunkenc.Iterator. // Next implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Next() chunkenc.ValueType { func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
c.cur++ peekFloatTS := noTS
if c.cur < len(c.series.samples) { if c.floatsCur+1 < len(c.series.floats) {
return chunkenc.ValFloat peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp
} }
return chunkenc.ValNone peekHistTS := noTS
// TODO(beorn7): Add histogram support. if c.histogramsCur+1 < len(c.series.histograms) {
peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp
}
c.curValType = chunkenc.ValNone
switch {
case peekFloatTS < peekHistTS:
c.floatsCur++
c.curValType = chunkenc.ValFloat
case peekHistTS < peekFloatTS:
c.histogramsCur++
c.curValType = chunkenc.ValHistogram
case peekFloatTS == noTS && peekHistTS == noTS:
// This only happens when the iterator is exhausted; we set the cursors off the end to prevent
// Seek() from returning anything afterwards.
c.floatsCur = len(c.series.floats)
c.histogramsCur = len(c.series.histograms)
default:
// Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms
// anyway otherwise the histogram sample will get selected on the next call to Next().
c.floatsCur++
c.histogramsCur++
c.curValType = chunkenc.ValFloat
}
return c.curValType
} }
// Err implements chunkenc.Iterator. // Err implements chunkenc.Iterator.
@ -525,8 +625,11 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the // HistogramProtoToHistogram extracts a (normal integer) Histogram from the
// provided proto message. The caller has to make sure that the proto message // provided proto message. The caller has to make sure that the proto message
// represents an integer histogram and not a float histogram. // represents an integer histogram and not a float histogram, or it panics.
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
if hp.IsFloatHistogram() {
panic("HistogramProtoToHistogram called with a float histogram")
}
return &histogram.Histogram{ return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint), CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema, Schema: hp.Schema,
@ -541,10 +644,14 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
} }
} }
// HistogramProtoToFloatHistogram extracts a (normal integer) Histogram from the // FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
// provided proto message to a Float Histogram. The caller has to make sure that // provided proto message to a Float Histogram. The caller has to make sure that
// the proto message represents an float histogram and not a integer histogram. // the proto message represents a float histogram and not an integer histogram,
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { // or it panics.
func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
if !hp.IsFloatHistogram() {
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
}
return &histogram.FloatHistogram{ return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint), CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema, Schema: hp.Schema,
@ -559,6 +666,27 @@ func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogr
} }
} }
// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
// float histogram, or it panics.
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
if hp.IsFloatHistogram() {
panic("HistogramProtoToFloatHistogram called with a float histogram")
}
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: float64(hp.GetZeroCountInt()),
Count: float64(hp.GetCountInt()),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
}
}
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s)) spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ { for i := 0; i < len(s); i++ {
@ -568,6 +696,16 @@ func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
return spans return spans
} }
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram { func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
return prompb.Histogram{ return prompb.Histogram{
Count: &prompb.Histogram_CountInt{CountInt: h.Count}, Count: &prompb.Histogram_CountInt{CountInt: h.Count},

View file

@ -55,9 +55,10 @@ func (r *ewmaRate) tick() {
r.mutex.Lock() r.mutex.Lock()
defer r.mutex.Unlock() defer r.mutex.Unlock()
if r.init { switch {
case r.init:
r.lastRate += r.alpha * (instantRate - r.lastRate) r.lastRate += r.alpha * (instantRate - r.lastRate)
} else if newEvents > 0 { case newEvents > 0:
r.init = true r.init = true
r.lastRate = instantRate r.lastRate = instantRate
} }

View file

@ -609,7 +609,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc() t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff)) time.Sleep(time.Duration(backoff))
backoff = backoff * 2 backoff *= 2
// It is reasonable to use t.cfg.MaxBackoff here, as if we have hit // It is reasonable to use t.cfg.MaxBackoff here, as if we have hit
// the full backoff we are likely waiting for external resources. // the full backoff we are likely waiting for external resources.
if backoff > t.cfg.MaxBackoff { if backoff > t.cfg.MaxBackoff {
@ -660,7 +660,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc() t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff)) time.Sleep(time.Duration(backoff))
backoff = backoff * 2 backoff *= 2
if backoff > t.cfg.MaxBackoff { if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff backoff = t.cfg.MaxBackoff
} }
@ -707,7 +707,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc() t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff)) time.Sleep(time.Duration(backoff))
backoff = backoff * 2 backoff *= 2
if backoff > t.cfg.MaxBackoff { if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff backoff = t.cfg.MaxBackoff
} }
@ -754,7 +754,7 @@ outer:
t.metrics.enqueueRetriesTotal.Inc() t.metrics.enqueueRetriesTotal.Inc()
time.Sleep(time.Duration(backoff)) time.Sleep(time.Duration(backoff))
backoff = backoff * 2 backoff *= 2
if backoff > t.cfg.MaxBackoff { if backoff > t.cfg.MaxBackoff {
backoff = t.cfg.MaxBackoff backoff = t.cfg.MaxBackoff
} }
@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int {
return t.numShards return t.numShards
} }
if numShards > t.cfg.MaxShards { switch {
case numShards > t.cfg.MaxShards:
numShards = t.cfg.MaxShards numShards = t.cfg.MaxShards
} else if numShards < t.cfg.MinShards { case numShards < t.cfg.MinShards:
numShards = t.cfg.MinShards numShards = t.cfg.MinShards
} }
return numShards return numShards
@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
} }
sleepDuration = backoff sleepDuration = backoff
if backoffErr.retryAfter > 0 { switch {
case backoffErr.retryAfter > 0:
sleepDuration = backoffErr.retryAfter sleepDuration = backoffErr.retryAfter
level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration)
} else if backoffErr.retryAfter < 0 { case backoffErr.retryAfter < 0:
level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism")
} }

View file

@ -278,5 +278,5 @@ func (sf seriesFilter) Labels() labels.Labels {
b := labels.NewBuilder(sf.Series.Labels()) b := labels.NewBuilder(sf.Series.Labels())
// todo: check if this is too inefficient. // todo: check if this is too inefficient.
b.Del(sf.toFilter...) b.Del(sf.toFilter...)
return b.Labels(labels.EmptyLabels()) return b.Labels()
} }

View file

@ -125,8 +125,8 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
} }
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
if hp.GetCountFloat() > 0 || hp.GetZeroCountFloat() > 0 { // It is a float histogram. if hp.IsFloatHistogram() {
fhs := HistogramProtoToFloatHistogram(hp) fhs := FloatHistogramProtoToFloatHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
} else { } else {
hs := HistogramProtoToHistogram(hp) hs := HistogramProtoToHistogram(hp)

View file

@ -109,7 +109,7 @@ func (it *listSeriesIterator) Reset(samples Samples) {
func (it *listSeriesIterator) At() (int64, float64) { func (it *listSeriesIterator) At() (int64, float64) {
s := it.samples.Get(it.idx) s := it.samples.Get(it.idx)
return s.T(), s.V() return s.T(), s.F()
} }
func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
@ -376,10 +376,17 @@ func (e errChunksIterator) Err() error { return e.err }
// ExpandSamples iterates over all samples in the iterator, buffering all in slice. // ExpandSamples iterates over all samples in the iterator, buffering all in slice.
// Optionally it takes samples constructor, useful when you want to compare sample slices with different // Optionally it takes samples constructor, useful when you want to compare sample slices with different
// sample implementations. if nil, sample type from this package will be used. // sample implementations. if nil, sample type from this package will be used.
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
if newSampleFn == nil { if newSampleFn == nil {
newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
return sample{t, v, h, fh} switch {
case h != nil:
return hSample{t, h}
case fh != nil:
return fhSample{t, fh}
default:
return fSample{t, f}
}
} }
} }
@ -389,12 +396,12 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64,
case chunkenc.ValNone: case chunkenc.ValNone:
return result, iter.Err() return result, iter.Err()
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, v := iter.At() t, f := iter.At()
// NaNs can't be compared normally, so substitute for another value. // NaNs can't be compared normally, so substitute for another value.
if math.IsNaN(v) { if math.IsNaN(f) {
v = -42 f = -42
} }
result = append(result, newSampleFn(t, v, nil, nil)) result = append(result, newSampleFn(t, f, nil, nil))
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
t, h := iter.AtHistogram() t, h := iter.AtHistogram()
result = append(result, newSampleFn(t, 0, h, nil)) result = append(result, newSampleFn(t, 0, h, nil))

View file

@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
} }
bitmask = (uint64(1) << nbits) - 1 bitmask = (uint64(1) << nbits) - 1
v = v | ((b.buffer >> (b.valid - nbits)) & bitmask) v |= ((b.buffer >> (b.valid - nbits)) & bitmask)
b.valid -= nbits b.valid -= nbits
return v, nil return v, nil
@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
if b.streamOffset+nbytes == len(b.stream) { if b.streamOffset+nbytes == len(b.stream) {
// There can be concurrent writes happening on the very last byte // There can be concurrent writes happening on the very last byte
// of the stream, so use the copy we took at initialization time. // of the stream, so use the copy we took at initialization time.
buffer = buffer | uint64(b.last) buffer |= uint64(b.last)
// Read up to the byte before // Read up to the byte before
skip = 1 skip = 1
} }
for i := 0; i < nbytes-skip; i++ { for i := 0; i < nbytes-skip; i++ {
buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
} }
b.buffer = buffer b.buffer = buffer

View file

@ -47,20 +47,9 @@ func (e Encoding) String() string {
return "<unknown>" return "<unknown>"
} }
// Chunk encodings for out-of-order chunks.
// These encodings must be only used by the Head block for its internal bookkeeping.
const (
OutOfOrderMask = 0b10000000
EncOOOXOR = EncXOR | OutOfOrderMask
)
func IsOutOfOrderChunk(e Encoding) bool {
return (e & OutOfOrderMask) != 0
}
// IsValidEncoding returns true for supported encodings. // IsValidEncoding returns true for supported encodings.
func IsValidEncoding(e Encoding) bool { func IsValidEncoding(e Encoding) bool {
return e == EncXOR || e == EncOOOXOR || e == EncHistogram || e == EncFloatHistogram return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
} }
// Chunk holds a sequence of sample pairs that can be iterated over and appended to. // Chunk holds a sequence of sample pairs that can be iterated over and appended to.
@ -107,7 +96,7 @@ type Iterator interface {
// timestamp equal or greater than t. If the current sample found by a // timestamp equal or greater than t. If the current sample found by a
// previous `Next` or `Seek` operation already has this property, Seek // previous `Next` or `Seek` operation already has this property, Seek
// has no effect. If a sample has been found, Seek returns the type of // has no effect. If a sample has been found, Seek returns the type of
// its value. Otherwise, it returns ValNone, after with the iterator is // its value. Otherwise, it returns ValNone, after which the iterator is
// exhausted. // exhausted.
Seek(t int64) ValueType Seek(t int64) ValueType
// At returns the current timestamp/value pair if the value is a float. // At returns the current timestamp/value pair if the value is a float.
@ -262,7 +251,7 @@ func NewPool() Pool {
func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
switch e { switch e {
case EncXOR, EncOOOXOR: case EncXOR:
c := p.xor.Get().(*XORChunk) c := p.xor.Get().(*XORChunk)
c.b.stream = b c.b.stream = b
c.b.count = 0 c.b.count = 0
@ -283,7 +272,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
func (p *pool) Put(c Chunk) error { func (p *pool) Put(c Chunk) error {
switch c.Encoding() { switch c.Encoding() {
case EncXOR, EncOOOXOR: case EncXOR:
xc, ok := c.(*XORChunk) xc, ok := c.(*XORChunk)
// This may happen often with wrapped chunks. Nothing we can really do about // This may happen often with wrapped chunks. Nothing we can really do about
// it but returning an error would cause a lot of allocations again. Thus, // it but returning an error would cause a lot of allocations again. Thus,
@ -327,7 +316,7 @@ func (p *pool) Put(c Chunk) error {
// bytes. // bytes.
func FromData(e Encoding, d []byte) (Chunk, error) { func FromData(e Encoding, d []byte) (Chunk, error) {
switch e { switch e {
case EncXOR, EncOOOXOR: case EncXOR:
return &XORChunk{b: bstream{count: 0, stream: d}}, nil return &XORChunk{b: bstream{count: 0, stream: d}}, nil
case EncHistogram: case EncHistogram:
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil

View file

@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had // To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end // appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state. // and populate via the iterator's state.
for it.Next() == ValFloatHistogram { for it.Next() == ValFloatHistogram { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err
@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
it.tDelta = it.tDelta + tDod it.tDelta += tDod
it.t += it.tDelta it.t += it.tDelta
if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok { if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok {

View file

@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had // To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end // appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state. // and populate via the iterator's state.
for it.Next() == ValHistogram { for it.Next() == ValHistogram { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err
@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
it.tDelta = it.tDelta + tDod it.tDelta += tDod
it.t += it.tDelta it.t += it.tDelta
cntDod, err := readVarbitInt(&it.br) cntDod, err := readVarbitInt(&it.br)
@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
it.cntDelta = it.cntDelta + cntDod it.cntDelta += cntDod
it.cnt = uint64(int64(it.cnt) + it.cntDelta) it.cnt = uint64(int64(it.cnt) + it.cntDelta)
zcntDod, err := readVarbitInt(&it.br) zcntDod, err := readVarbitInt(&it.br)
@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType {
it.err = err it.err = err
return ValNone return ValNone
} }
it.zCntDelta = it.zCntDelta + zcntDod it.zCntDelta += zcntDod
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta) it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
ok := it.readSum() ok := it.readSum()

View file

@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) {
} }
if bits > (1 << (sz - 1)) { if bits > (1 << (sz - 1)) {
// Or something. // Or something.
bits = bits - (1 << sz) bits -= (1 << sz)
} }
val = int64(bits) val = int64(bits)
} }

View file

@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
// To get an appender we must know the state it would have if we had // To get an appender we must know the state it would have if we had
// appended all existing data from scratch. // appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state. // We iterate through the end and populate via the iterator's state.
for it.Next() != ValNone { for it.Next() != ValNone { // nolint:revive
} }
if err := it.Err(); err != nil { if err := it.Err(); err != nil {
return nil, err return nil, err
@ -152,26 +152,25 @@ type xorAppender struct {
trailing uint8 trailing uint8
} }
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended a histogram to an xor chunk") panic("appended a histogram to an xor chunk")
} }
func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) { func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to an xor chunk") panic("appended a float histogram to an xor chunk")
} }
func (a *xorAppender) Append(t int64, v float64) { func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64 var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes()) num := binary.BigEndian.Uint16(a.b.bytes())
switch num {
if num == 0 { case 0:
buf := make([]byte, binary.MaxVarintLen64) buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutVarint(buf, t)] { for _, b := range buf[:binary.PutVarint(buf, t)] {
a.b.writeByte(b) a.b.writeByte(b)
} }
a.b.writeBits(math.Float64bits(v), 64) a.b.writeBits(math.Float64bits(v), 64)
case 1:
} else if num == 1 {
tDelta = uint64(t - a.t) tDelta = uint64(t - a.t)
buf := make([]byte, binary.MaxVarintLen64) buf := make([]byte, binary.MaxVarintLen64)
@ -181,7 +180,7 @@ func (a *xorAppender) Append(t int64, v float64) {
a.writeVDelta(v) a.writeVDelta(v)
} else { default:
tDelta = uint64(t - a.t) tDelta = uint64(t - a.t)
dod := int64(tDelta - a.tDelta) dod := int64(tDelta - a.tDelta)
@ -321,7 +320,7 @@ func (it *xorIterator) Next() ValueType {
return ValNone return ValNone
} }
it.tDelta = tDelta it.tDelta = tDelta
it.t = it.t + int64(it.tDelta) it.t += int64(it.tDelta)
return it.readValue() return it.readValue()
} }
@ -384,7 +383,7 @@ func (it *xorIterator) Next() ValueType {
} }
it.tDelta = uint64(int64(it.tDelta) + dod) it.tDelta = uint64(int64(it.tDelta) + dod)
it.t = it.t + int64(it.tDelta) it.t += int64(it.tDelta)
return it.readValue() return it.readValue()
} }
@ -506,12 +505,3 @@ func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error
*value = math.Float64frombits(vbits) *value = math.Float64frombits(vbits)
return nil return nil
} }
// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
type OOOXORChunk struct {
*XORChunk
}
func (c *OOOXORChunk) Encoding() Encoding {
return EncOOOXOR
}

View file

@ -42,6 +42,7 @@ type chunkWriteJob struct {
maxt int64 maxt int64
chk chunkenc.Chunk chk chunkenc.Chunk
ref ChunkDiskMapperRef ref ChunkDiskMapperRef
isOOO bool
callback func(error) callback func(error)
} }
@ -76,7 +77,7 @@ type chunkWriteQueue struct {
} }
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests. // writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool) error type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error
func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue { func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
counters := prometheus.NewCounterVec( counters := prometheus.NewCounterVec(
@ -133,7 +134,7 @@ func (c *chunkWriteQueue) start() {
} }
func (c *chunkWriteQueue) processJob(job chunkWriteJob) { func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.cutFile) err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile)
if job.callback != nil { if job.callback != nil {
job.callback(err) job.callback(err)
} }

View file

@ -273,6 +273,26 @@ func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Poo
return m, m.openMMapFiles() return m, m.openMMapFiles()
} }
// Chunk encodings for out-of-order chunks.
// These encodings must be only used by the Head block for its internal bookkeeping.
const (
OutOfOrderMask = uint8(0b10000000)
)
func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
enc := uint8(sourceEncoding) | OutOfOrderMask
return chunkenc.Encoding(enc)
}
func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
return (uint8(e) & OutOfOrderMask) != 0
}
func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
restored := uint8(sourceEncoding) & (^OutOfOrderMask)
return chunkenc.Encoding(restored)
}
// openMMapFiles opens all files within dir for mmapping. // openMMapFiles opens all files within dir for mmapping.
func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{} cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
@ -403,17 +423,17 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
// WriteChunk writes the chunk to the disk. // WriteChunk writes the chunk to the disk.
// The returned chunk ref is the reference from where the chunk encoding starts for the chunk. // The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) { func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) {
// cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue). // cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue).
cdm.evtlPosMtx.Lock() cdm.evtlPosMtx.Lock()
defer cdm.evtlPosMtx.Unlock() defer cdm.evtlPosMtx.Unlock()
ref, cutFile := cdm.evtlPos.getNextChunkRef(chk) ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
if cdm.writeQueue != nil { if cdm.writeQueue != nil {
return cdm.writeChunkViaQueue(ref, cutFile, seriesRef, mint, maxt, chk, callback) return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback)
} }
err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, cutFile) err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
if callback != nil { if callback != nil {
callback(err) callback(err)
} }
@ -421,7 +441,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64
return ref return ref
} }
func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) { func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
var err error var err error
if callback != nil { if callback != nil {
defer func() { defer func() {
@ -438,13 +458,14 @@ func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile b
maxt: maxt, maxt: maxt,
chk: chk, chk: chk,
ref: ref, ref: ref,
isOOO: isOOO,
callback: callback, callback: callback,
}) })
return ref return ref
} }
func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) (err error) { func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) {
cdm.writePathMtx.Lock() cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock() defer cdm.writePathMtx.Unlock()
@ -476,7 +497,11 @@ func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64
bytesWritten += MintMaxtSize bytesWritten += MintMaxtSize
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt)) binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt))
bytesWritten += MintMaxtSize bytesWritten += MintMaxtSize
cdm.byteBuf[bytesWritten] = byte(chk.Encoding()) enc := chk.Encoding()
if isOOO {
enc = cdm.ApplyOutOfOrderMask(enc)
}
cdm.byteBuf[bytesWritten] = byte(enc)
bytesWritten += ChunkEncodingSize bytesWritten += ChunkEncodingSize
n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes()))) n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes())))
bytesWritten += n bytesWritten += n
@ -696,7 +721,9 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
// Encoding. // Encoding.
chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0] chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0]
sourceChkEnc := chunkenc.Encoding(chkEnc)
// Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
chkEnc = byte(cdm.RemoveMasks(sourceChkEnc))
// Data length. // Data length.
// With the minimum chunk length this should never cause us reading // With the minimum chunk length this should never cause us reading
// over the end of the slice. // over the end of the slice.
@ -762,7 +789,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
// and runs the provided function with information about each chunk. It returns on the first error encountered. // and runs the provided function with information about each chunk. It returns on the first error encountered.
// NOTE: This method needs to be called at least once after creating ChunkDiskMapper // NOTE: This method needs to be called at least once after creating ChunkDiskMapper
// to set the maxt of all the file. // to set the maxt of all the file.
func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) { func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) {
cdm.writePathMtx.Lock() cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock() defer cdm.writePathMtx.Unlock()
@ -860,8 +887,10 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
if maxt > mmapFile.maxt { if maxt > mmapFile.maxt {
mmapFile.maxt = maxt mmapFile.maxt = maxt
} }
isOOO := cdm.IsOutOfOrderChunk(chkEnc)
if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc); err != nil { // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
chkEnc = cdm.RemoveMasks(chkEnc)
if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil {
if cerr, ok := err.(*CorruptionErr); ok { if cerr, ok := err.(*CorruptionErr); ok {
cerr.Dir = cdm.dir.Name() cerr.Dir = cdm.dir.Name()
cerr.FileIndex = segID cerr.FileIndex = segID
@ -970,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error {
cdm.readPathMtx.RLock() cdm.readPathMtx.RLock()
lastSeq := 0 lastSeq := 0
for seg := range cdm.mmappedChunkFiles { for seg := range cdm.mmappedChunkFiles {
if seg >= cerr.FileIndex { switch {
case seg >= cerr.FileIndex:
segs = append(segs, seg) segs = append(segs, seg)
} else if seg > lastSeq { case seg > lastSeq:
lastSeq = seg lastSeq = seg
} }
} }

View file

@ -44,7 +44,7 @@ func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
curRange := minSize curRange := minSize
for i := 0; i < steps; i++ { for i := 0; i < steps; i++ {
ranges = append(ranges, curRange) ranges = append(ranges, curRange)
curRange = curRange * int64(stepSize) curRange *= int64(stepSize)
} }
return ranges return ranges
@ -75,7 +75,7 @@ type Compactor interface {
// LeveledCompactor implements the Compactor interface. // LeveledCompactor implements the Compactor interface.
type LeveledCompactor struct { type LeveledCompactor struct {
metrics *compactorMetrics metrics *CompactorMetrics
logger log.Logger logger log.Logger
ranges []int64 ranges []int64
chunkPool chunkenc.Pool chunkPool chunkenc.Pool
@ -84,47 +84,47 @@ type LeveledCompactor struct {
mergeFunc storage.VerticalChunkSeriesMergeFunc mergeFunc storage.VerticalChunkSeriesMergeFunc
} }
type compactorMetrics struct { type CompactorMetrics struct {
ran prometheus.Counter Ran prometheus.Counter
populatingBlocks prometheus.Gauge PopulatingBlocks prometheus.Gauge
overlappingBlocks prometheus.Counter OverlappingBlocks prometheus.Counter
duration prometheus.Histogram Duration prometheus.Histogram
chunkSize prometheus.Histogram ChunkSize prometheus.Histogram
chunkSamples prometheus.Histogram ChunkSamples prometheus.Histogram
chunkRange prometheus.Histogram ChunkRange prometheus.Histogram
} }
func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { func newCompactorMetrics(r prometheus.Registerer) *CompactorMetrics {
m := &compactorMetrics{} m := &CompactorMetrics{}
m.ran = prometheus.NewCounter(prometheus.CounterOpts{ m.Ran = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_total", Name: "prometheus_tsdb_compactions_total",
Help: "Total number of compactions that were executed for the partition.", Help: "Total number of compactions that were executed for the partition.",
}) })
m.populatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{ m.PopulatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_compaction_populating_block", Name: "prometheus_tsdb_compaction_populating_block",
Help: "Set to 1 when a block is currently being written to the disk.", Help: "Set to 1 when a block is currently being written to the disk.",
}) })
m.overlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{ m.OverlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_vertical_compactions_total", Name: "prometheus_tsdb_vertical_compactions_total",
Help: "Total number of compactions done on overlapping blocks.", Help: "Total number of compactions done on overlapping blocks.",
}) })
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ m.Duration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_duration_seconds", Name: "prometheus_tsdb_compaction_duration_seconds",
Help: "Duration of compaction runs", Help: "Duration of compaction runs",
Buckets: prometheus.ExponentialBuckets(1, 2, 14), Buckets: prometheus.ExponentialBuckets(1, 2, 14),
}) })
m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ m.ChunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_size_bytes", Name: "prometheus_tsdb_compaction_chunk_size_bytes",
Help: "Final size of chunks on their first compaction", Help: "Final size of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(32, 1.5, 12), Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
}) })
m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ m.ChunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_samples", Name: "prometheus_tsdb_compaction_chunk_samples",
Help: "Final number of samples on their first compaction", Help: "Final number of samples on their first compaction",
Buckets: prometheus.ExponentialBuckets(4, 1.5, 12), Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
}) })
m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ m.ChunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_range_seconds", Name: "prometheus_tsdb_compaction_chunk_range_seconds",
Help: "Final time range of chunks on their first compaction", Help: "Final time range of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(100, 4, 10), Buckets: prometheus.ExponentialBuckets(100, 4, 10),
@ -132,13 +132,13 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
if r != nil { if r != nil {
r.MustRegister( r.MustRegister(
m.ran, m.Ran,
m.populatingBlocks, m.PopulatingBlocks,
m.overlappingBlocks, m.OverlappingBlocks,
m.duration, m.Duration,
m.chunkRange, m.ChunkRange,
m.chunkSamples, m.ChunkSamples,
m.chunkSize, m.ChunkSize,
) )
} }
return m return m
@ -392,6 +392,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
// Compact creates a new block in the compactor's directory from the blocks in the // Compact creates a new block in the compactor's directory from the blocks in the
// provided directories. // provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) {
return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{})
}
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) {
var ( var (
blocks []BlockReader blocks []BlockReader
bs []*Block bs []*Block
@ -435,7 +439,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u
uid = ulid.MustNew(ulid.Now(), rand.Reader) uid = ulid.MustNew(ulid.Now(), rand.Reader)
meta := CompactBlockMetas(uid, metas...) meta := CompactBlockMetas(uid, metas...)
err = c.write(dest, meta, blocks...) err = c.write(dest, meta, blockPopulator, blocks...)
if err == nil { if err == nil {
if meta.Stats.NumSamples == 0 { if meta.Stats.NumSamples == 0 {
for _, b := range bs { for _, b := range bs {
@ -471,7 +475,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u
} }
errs := tsdb_errors.NewMulti(err) errs := tsdb_errors.NewMulti(err)
if err != context.Canceled { if !errors.Is(err, context.Canceled) {
for _, b := range bs { for _, b := range bs {
if err := b.setCompactionFailed(); err != nil { if err := b.setCompactionFailed(); err != nil {
errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir()))
@ -501,7 +505,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p
} }
} }
err := c.write(dest, meta, b) err := c.write(dest, meta, DefaultBlockPopulator{}, b)
if err != nil { if err != nil {
return uid, err return uid, err
} }
@ -546,7 +550,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
} }
// write creates a new block that is the union of the provided blocks into dir. // write creates a new block that is the union of the provided blocks into dir.
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) { func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator BlockPopulator, blocks ...BlockReader) (err error) {
dir := filepath.Join(dest, meta.ULID.String()) dir := filepath.Join(dest, meta.ULID.String())
tmp := dir + tmpForCreationBlockDirSuffix tmp := dir + tmpForCreationBlockDirSuffix
var closers []io.Closer var closers []io.Closer
@ -557,8 +561,8 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
if err := os.RemoveAll(tmp); err != nil { if err := os.RemoveAll(tmp); err != nil {
level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error())
} }
c.metrics.ran.Inc() c.metrics.Ran.Inc()
c.metrics.duration.Observe(time.Since(t).Seconds()) c.metrics.Duration.Observe(time.Since(t).Seconds())
}(time.Now()) }(time.Now())
if err = os.RemoveAll(tmp); err != nil { if err = os.RemoveAll(tmp); err != nil {
@ -582,9 +586,9 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
if meta.Compaction.Level == 1 { if meta.Compaction.Level == 1 {
chunkw = &instrumentedChunkWriter{ chunkw = &instrumentedChunkWriter{
ChunkWriter: chunkw, ChunkWriter: chunkw,
size: c.metrics.chunkSize, size: c.metrics.ChunkSize,
samples: c.metrics.chunkSamples, samples: c.metrics.ChunkSamples,
trange: c.metrics.chunkRange, trange: c.metrics.ChunkRange,
} }
} }
@ -594,7 +598,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
} }
closers = append(closers, indexw) closers = append(closers, indexw)
if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil { if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil {
return errors.Wrap(err, "populate block") return errors.Wrap(err, "populate block")
} }
@ -659,10 +663,16 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
return nil return nil
} }
// populateBlock fills the index and chunk writers with new data gathered as the union type BlockPopulator interface {
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error
}
type DefaultBlockPopulator struct{}
// PopulateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block. // of the provided blocks. It returns meta information for the new block.
// It expects sorted blocks input by mint. // It expects sorted blocks input by mint.
func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
if len(blocks) == 0 { if len(blocks) == 0 {
return errors.New("cannot populate block from no readers") return errors.New("cannot populate block from no readers")
} }
@ -679,23 +689,23 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
errs.Add(errors.Wrap(cerr, "close")) errs.Add(errors.Wrap(cerr, "close"))
} }
err = errs.Err() err = errs.Err()
c.metrics.populatingBlocks.Set(0) metrics.PopulatingBlocks.Set(0)
}() }()
c.metrics.populatingBlocks.Set(1) metrics.PopulatingBlocks.Set(1)
globalMaxt := blocks[0].Meta().MaxTime globalMaxt := blocks[0].Meta().MaxTime
for i, b := range blocks { for i, b := range blocks {
select { select {
case <-c.ctx.Done(): case <-ctx.Done():
return c.ctx.Err() return ctx.Err()
default: default:
} }
if !overlapping { if !overlapping {
if i > 0 && b.Meta().MinTime < globalMaxt { if i > 0 && b.Meta().MinTime < globalMaxt {
c.metrics.overlappingBlocks.Inc() metrics.OverlappingBlocks.Inc()
overlapping = true overlapping = true
level.Info(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) level.Info(logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID)
} }
if b.Meta().MaxTime > globalMaxt { if b.Meta().MaxTime > globalMaxt {
globalMaxt = b.Meta().MaxTime globalMaxt = b.Meta().MaxTime
@ -727,7 +737,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
} }
all = indexr.SortedPostings(all) all = indexr.SortedPostings(all)
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
sets = append(sets, newBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false))
syms := indexr.Symbols() syms := indexr.Symbols()
if i == 0 { if i == 0 {
symbols = syms symbols = syms
@ -755,14 +765,14 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
if len(sets) > 1 { if len(sets) > 1 {
// Merge series using specified chunk series merger. // Merge series using specified chunk series merger.
// The default one is the compacting series merger. // The default one is the compacting series merger.
set = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc) set = storage.NewMergeChunkSeriesSet(sets, mergeFunc)
} }
// Iterate over all sorted chunk series. // Iterate over all sorted chunk series.
for set.Next() { for set.Next() {
select { select {
case <-c.ctx.Done(): case <-ctx.Done():
return c.ctx.Err() return ctx.Err()
default: default:
} }
s := set.At() s := set.At()
@ -797,7 +807,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
} }
for _, chk := range chks { for _, chk := range chks {
if err := c.chunkPool.Put(chk.Chunk); err != nil { if err := chunkPool.Put(chk.Chunk); err != nil {
return errors.Wrap(err, "put chunk") return errors.Wrap(err, "put chunk")
} }
} }

View file

@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
Help: "Size of symbol table in memory for loaded blocks", Help: "Size of symbol table in memory for loaded blocks",
}, func() float64 { }, func() float64 {
db.mtx.RLock() db.mtx.RLock()
blocks := db.blocks[:] blocks := db.blocks
db.mtx.RUnlock() db.mtx.RUnlock()
symTblSize := uint64(0) symTblSize := uint64(0)
for _, b := range blocks { for _, b := range blocks {
@ -828,11 +828,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
if err := wbl.Repair(initErr); err != nil { if err := wbl.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted OOO WAL") return nil, errors.Wrap(err, "repair corrupted OOO WAL")
} }
level.Info(db.logger).Log("msg", "Successfully repaired OOO WAL")
} else { } else {
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
if err := wal.Repair(initErr); err != nil { if err := wal.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted WAL") return nil, errors.Wrap(err, "repair corrupted WAL")
} }
level.Info(db.logger).Log("msg", "Successfully repaired WAL")
} }
} }
@ -961,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
// Create WBL if it was not present and if OOO is enabled with WAL enabled. // Create WBL if it was not present and if OOO is enabled with WAL enabled.
var wblog *wlog.WL var wblog *wlog.WL
var err error var err error
if db.head.wbl != nil { switch {
case db.head.wbl != nil:
// The existing WBL from the disk might have been replayed while OOO was disabled. // The existing WBL from the disk might have been replayed while OOO was disabled.
wblog = db.head.wbl wblog = db.head.wbl
} else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0:
segmentSize := wlog.DefaultSegmentSize segmentSize := wlog.DefaultSegmentSize
// Wal is set to a custom size. // Wal is set to a custom size.
if db.opts.WALSegmentSize > 0 { if db.opts.WALSegmentSize > 0 {
@ -1184,7 +1187,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
} }
}() }()
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize { for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize {
mint, maxt := t, t+blockSize mint, maxt := t, t+blockSize
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil) uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil)
@ -1506,7 +1509,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
blocksSize := db.Head().Size() blocksSize := db.Head().Size()
for i, block := range blocks { for i, block := range blocks {
blocksSize += block.Size() blocksSize += block.Size()
if blocksSize > int64(db.opts.MaxBytes) { if blocksSize > db.opts.MaxBytes {
// Add this and all following blocks for deletion. // Add this and all following blocks for deletion.
for _, b := range blocks[i:] { for _, b := range blocks[i:] {
deletable[b.meta.ULID] = struct{}{} deletable[b.meta.ULID] = struct{}{}
@ -1530,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
} }
toDelete := filepath.Join(db.dir, ulid.String()) toDelete := filepath.Join(db.dir, ulid.String())
if _, err := os.Stat(toDelete); os.IsNotExist(err) { switch _, err := os.Stat(toDelete); {
case os.IsNotExist(err):
// Noop. // Noop.
continue continue
} else if err != nil { case err != nil:
return errors.Wrapf(err, "stat dir %v", toDelete) return errors.Wrapf(err, "stat dir %v", toDelete)
} }

View file

@ -16,6 +16,7 @@ package errors
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
) )
@ -79,6 +80,19 @@ func (es nonNilMultiError) Error() string {
return buf.String() return buf.String()
} }
// Is attempts to match the provided error against errors in the error list.
//
// This function allows errors.Is to traverse the values stored in the MultiError.
// It returns true if any of the errors in the list match the target.
func (es nonNilMultiError) Is(target error) bool {
for _, err := range es.errs {
if errors.Is(err, target) {
return true
}
}
return false
}
// CloseAll closes all given closers while recording error in MultiError. // CloseAll closes all given closers while recording error in MultiError.
func CloseAll(cs []io.Closer) error { func CloseAll(cs []io.Closer) error {
errs := NewMulti() errs := NewMulti()

View file

@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics {
// 1GB of extra memory, accounting for the fact that this is heap allocated space. // 1GB of extra memory, accounting for the fact that this is heap allocated space.
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be // If len <= 0, then the exemplar storage is essentially a noop storage but can later be
// resized to store exemplars. // resized to store exemplars.
func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) { func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) {
if len < 0 { if length < 0 {
len = 0 length = 0
} }
c := &CircularExemplarStorage{ c := &CircularExemplarStorage{
exemplars: make([]*circularBufferEntry, len), exemplars: make([]*circularBufferEntry, length),
index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries), index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
metrics: m, metrics: m,
} }
c.metrics.maxExemplars.Set(float64(len)) c.metrics.maxExemplars.Set(float64(length))
return c, nil return c, nil
} }
@ -151,7 +151,7 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ
func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) {
ret := make([]exemplar.QueryResult, 0) ret := make([]exemplar.QueryResult, 0)
if len(ce.exemplars) <= 0 { if len(ce.exemplars) == 0 {
return ret, nil return ret, nil
} }
@ -219,7 +219,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
// Not thread safe. The append parameters tells us whether this is an external validation, or internal // Not thread safe. The append parameters tells us whether this is an external validation, or internal
// as a result of an AddExemplar call, in which case we should update any relevant metrics. // as a result of an AddExemplar call, in which case we should update any relevant metrics.
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error { func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
if len(ce.exemplars) <= 0 { if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled return storage.ErrExemplarsDisabled
} }
@ -334,7 +334,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) {
} }
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
if len(ce.exemplars) <= 0 { if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled return storage.ErrExemplarsDisabled
} }

View file

@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/zeropool"
) )
var ( var (
@ -83,13 +84,13 @@ type Head struct {
exemplarMetrics *ExemplarMetrics exemplarMetrics *ExemplarMetrics
exemplars ExemplarStorage exemplars ExemplarStorage
logger log.Logger logger log.Logger
appendPool sync.Pool appendPool zeropool.Pool[[]record.RefSample]
exemplarsPool sync.Pool exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef]
histogramsPool sync.Pool histogramsPool zeropool.Pool[[]record.RefHistogramSample]
floatHistogramsPool sync.Pool floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
metadataPool sync.Pool metadataPool zeropool.Pool[[]record.RefMetadata]
seriesPool sync.Pool seriesPool zeropool.Pool[[]*memSeries]
bytesPool sync.Pool bytesPool zeropool.Pool[[]byte]
memChunkPool sync.Pool memChunkPool sync.Pool
// All series addressable by their ID or hash. // All series addressable by their ID or hash.
@ -573,7 +574,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
func (h *Head) Init(minValidTime int64) error { func (h *Head) Init(minValidTime int64) error {
h.minValidTime.Store(minValidTime) h.minValidTime.Store(minValidTime)
defer func() { defer func() {
h.postings.EnsureOrder() h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
}() }()
defer h.gc() // After loading the wal remove the obsolete data from the head. defer h.gc() // After loading the wal remove the obsolete data from the head.
defer func() { defer func() {
@ -590,6 +591,7 @@ func (h *Head) Init(minValidTime int64) error {
snapIdx, snapOffset := -1, 0 snapIdx, snapOffset := -1, 0
refSeries := make(map[chunks.HeadSeriesRef]*memSeries) refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
snapshotLoaded := false
if h.opts.EnableMemorySnapshotOnShutdown { if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer // If there are any WAL files, there should be at least one WAL file with an index that is current or newer
@ -619,6 +621,7 @@ func (h *Head) Init(minValidTime int64) error {
var err error var err error
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil { if err == nil {
snapshotLoaded = true
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
} }
if err != nil { if err != nil {
@ -636,26 +639,36 @@ func (h *Head) Init(minValidTime int64) error {
} }
mmapChunkReplayStart := time.Now() mmapChunkReplayStart := time.Now()
mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries) var (
if err != nil { mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
// TODO(codesome): clear out all m-map chunks here for refSeries. oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) lastMmapRef chunks.ChunkDiskMapperRef
if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { err error
h.metrics.mmapChunkCorruptionTotal.Inc() )
} if snapshotLoaded || h.wal != nil {
// If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
// Discard snapshot data since we need to replay the WAL for the missed m-map chunks data. // anyway. So we only load m-map chunks when it won't be discarded.
snapIdx, snapOffset = -1, 0 mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries)
// If this fails, data will be recovered from WAL.
// Hence we wont lose any data (given WAL is not corrupt).
mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err)
if err != nil { if err != nil {
return err // TODO(codesome): clear out all m-map chunks here for refSeries.
level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err)
if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok {
h.metrics.mmapChunkCorruptionTotal.Inc()
}
// Discard snapshot data since we need to replay the WAL for the missed m-map chunks data.
snapIdx, snapOffset = -1, 0
// If this fails, data will be recovered from WAL.
// Hence we wont lose any data (given WAL is not corrupt).
mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err)
if err != nil {
return err
}
} }
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
} }
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
if h.wal == nil { if h.wal == nil {
level.Info(h.logger).Log("msg", "WAL not found") level.Info(h.logger).Log("msg", "WAL not found")
return nil return nil
@ -784,10 +797,9 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
var lastRef, secondLastRef chunks.ChunkDiskMapperRef var lastRef, secondLastRef chunks.ChunkDiskMapperRef
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error { if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
secondLastRef = lastRef secondLastRef = lastRef
lastRef = chunkRef lastRef = chunkRef
isOOO := chunkenc.IsOutOfOrderChunk(encoding)
if !isOOO && maxt < h.minValidTime.Load() { if !isOOO && maxt < h.minValidTime.Load() {
return nil return nil
} }
@ -824,6 +836,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
numSamples: numSamples, numSamples: numSamples,
}) })
h.updateMinOOOMaxOOOTime(mint, maxt)
return nil return nil
} }
@ -1257,6 +1270,10 @@ func (h *Head) truncateOOO(lastWBLFile int, minOOOMmapRef chunks.ChunkDiskMapper
} }
} }
if h.wbl == nil {
return nil
}
return h.wbl.Truncate(lastWBLFile) return h.wbl.Truncate(lastWBLFile)
} }
@ -1436,7 +1453,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
} }
} }
for _, s := range stones { for _, s := range stones {
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0]) h.tombstones.AddInterval(s.Ref, s.Intervals[0])
} }
return nil return nil
@ -1847,7 +1864,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu
type sample struct { type sample struct {
t int64 t int64
v float64 f float64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram fh *histogram.FloatHistogram
} }
@ -1857,7 +1874,7 @@ func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHi
} }
func (s sample) T() int64 { return s.t } func (s sample) T() int64 { return s.t }
func (s sample) V() float64 { return s.v } func (s sample) F() float64 { return s.f }
func (s sample) H() *histogram.Histogram { return s.h } func (s sample) H() *histogram.Histogram { return s.h }
func (s sample) FH() *histogram.FloatHistogram { return s.fh } func (s sample) FH() *histogram.FloatHistogram { return s.fh }

View file

@ -199,11 +199,10 @@ func (h *Head) getAppendBuffer() []record.RefSample {
if b == nil { if b == nil {
return make([]record.RefSample, 0, 512) return make([]record.RefSample, 0, 512)
} }
return b.([]record.RefSample) return b
} }
func (h *Head) putAppendBuffer(b []record.RefSample) { func (h *Head) putAppendBuffer(b []record.RefSample) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.appendPool.Put(b[:0]) h.appendPool.Put(b[:0])
} }
@ -212,7 +211,7 @@ func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
if b == nil { if b == nil {
return make([]exemplarWithSeriesRef, 0, 512) return make([]exemplarWithSeriesRef, 0, 512)
} }
return b.([]exemplarWithSeriesRef) return b
} }
func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
@ -220,7 +219,6 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
return return
} }
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.exemplarsPool.Put(b[:0]) h.exemplarsPool.Put(b[:0])
} }
@ -229,11 +227,10 @@ func (h *Head) getHistogramBuffer() []record.RefHistogramSample {
if b == nil { if b == nil {
return make([]record.RefHistogramSample, 0, 512) return make([]record.RefHistogramSample, 0, 512)
} }
return b.([]record.RefHistogramSample) return b
} }
func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) { func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.histogramsPool.Put(b[:0]) h.histogramsPool.Put(b[:0])
} }
@ -242,11 +239,10 @@ func (h *Head) getFloatHistogramBuffer() []record.RefFloatHistogramSample {
if b == nil { if b == nil {
return make([]record.RefFloatHistogramSample, 0, 512) return make([]record.RefFloatHistogramSample, 0, 512)
} }
return b.([]record.RefFloatHistogramSample) return b
} }
func (h *Head) putFloatHistogramBuffer(b []record.RefFloatHistogramSample) { func (h *Head) putFloatHistogramBuffer(b []record.RefFloatHistogramSample) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.floatHistogramsPool.Put(b[:0]) h.floatHistogramsPool.Put(b[:0])
} }
@ -255,11 +251,10 @@ func (h *Head) getMetadataBuffer() []record.RefMetadata {
if b == nil { if b == nil {
return make([]record.RefMetadata, 0, 512) return make([]record.RefMetadata, 0, 512)
} }
return b.([]record.RefMetadata) return b
} }
func (h *Head) putMetadataBuffer(b []record.RefMetadata) { func (h *Head) putMetadataBuffer(b []record.RefMetadata) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.metadataPool.Put(b[:0]) h.metadataPool.Put(b[:0])
} }
@ -268,11 +263,10 @@ func (h *Head) getSeriesBuffer() []*memSeries {
if b == nil { if b == nil {
return make([]*memSeries, 0, 512) return make([]*memSeries, 0, 512)
} }
return b.([]*memSeries) return b
} }
func (h *Head) putSeriesBuffer(b []*memSeries) { func (h *Head) putSeriesBuffer(b []*memSeries) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.seriesPool.Put(b[:0]) h.seriesPool.Put(b[:0])
} }
@ -281,11 +275,10 @@ func (h *Head) getBytesBuffer() []byte {
if b == nil { if b == nil {
return make([]byte, 0, 1024) return make([]byte, 0, 1024)
} }
return b.([]byte) return b
} }
func (h *Head) putBytesBuffer(b []byte) { func (h *Head) putBytesBuffer(b []byte) {
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
h.bytesPool.Put(b[:0]) h.bytesPool.Put(b[:0])
} }
@ -351,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
} }
if value.IsStaleNaN(v) { if value.IsStaleNaN(v) {
if s.lastHistogramValue != nil { switch {
case s.lastHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
} else if s.lastFloatHistogramValue != nil { case s.lastFloatHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
} }
} }
@ -437,7 +431,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
return false, headMaxt - t, storage.ErrOutOfOrderSample return false, headMaxt - t, storage.ErrOutOfOrderSample
} }
// appendableHistogram checks whether the given sample is valid for appending to the series. // appendableHistogram checks whether the given histogram is valid for appending to the series.
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
c := s.head() c := s.head()
if c == nil { if c == nil {
@ -459,7 +453,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
return nil return nil
} }
// appendableFloatHistogram checks whether the given sample is valid for appending to the series. // appendableFloatHistogram checks whether the given float histogram is valid for appending to the series.
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error { func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error {
c := s.head() c := s.head()
if c == nil { if c == nil {
@ -559,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return 0, err return 0, err
} }
if created { if created {
if h != nil { switch {
case h != nil:
s.lastHistogramValue = &histogram.Histogram{} s.lastHistogramValue = &histogram.Histogram{}
} else if fh != nil { case fh != nil:
s.lastFloatHistogramValue = &histogram.FloatHistogram{} s.lastFloatHistogramValue = &histogram.FloatHistogram{}
} }
a.series = append(a.series, record.RefSeries{ a.series = append(a.series, record.RefSeries{
@ -571,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
} }
if h != nil { switch {
case h != nil:
s.Lock() s.Lock()
if err := s.appendableHistogram(t, h); err != nil { if err := s.appendableHistogram(t, h); err != nil {
s.Unlock() s.Unlock()
@ -588,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
H: h, H: h,
}) })
a.histogramSeries = append(a.histogramSeries, s) a.histogramSeries = append(a.histogramSeries, s)
} else if fh != nil { case fh != nil:
s.Lock() s.Lock()
if err := s.appendableFloatHistogram(t, fh); err != nil { if err := s.appendableFloatHistogram(t, fh); err != nil {
s.Unlock() s.Unlock()
@ -945,7 +941,10 @@ func (a *headAppender) Commit() (err error) {
var ok, chunkCreated bool var ok, chunkCreated bool
if err == nil && oooSample { switch {
case err != nil:
// Do nothing here.
case oooSample:
// Sample is OOO and OOO handling is enabled // Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance. // and the delta is within the OOO tolerance.
var mmapRef chunks.ChunkDiskMapperRef var mmapRef chunks.ChunkDiskMapperRef
@ -983,7 +982,7 @@ func (a *headAppender) Commit() (err error) {
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
samplesAppended-- samplesAppended--
} }
} else if err == nil { default:
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
if ok { if ok {
if s.T < inOrderMint { if s.T < inOrderMint {
@ -1184,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts) app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
} }
// We have 3 cases here // We have 3 cases here
// - !okToAppend -> We need to cut a new chunk. // - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs // - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram. // recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset { switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all // New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we // prior histogram samples within the chunk before we
// can process this one. // can process this one.
@ -1277,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts) app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
} }
// We have 3 cases here // We have 3 cases here
// - !okToAppend -> We need to cut a new chunk. // - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs // - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram. // recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset { switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all // New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we // prior histogram samples within the chunk before we
// can process this one. // can process this one.
@ -1453,8 +1454,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
return 0 return 0
} }
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality. xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
oooXor := &chunkenc.OOOXORChunk{XORChunk: xor} chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef, ref: chunkRef,
numSamples: uint16(xor.NumSamples()), numSamples: uint16(xor.NumSamples()),
@ -1471,7 +1471,7 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper
return return
} }
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, handleChunkWriteError) chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, false, handleChunkWriteError)
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{ s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
ref: chunkRef, ref: chunkRef,
numSamples: uint16(s.headChunk.chunk.NumSamples()), numSamples: uint16(s.headChunk.chunk.NumSamples()),

View file

@ -274,22 +274,36 @@ func (h *headChunkReader) Close() error {
// Chunk returns the chunk for the reference number. // Chunk returns the chunk for the reference number.
func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
chk, _, err := h.chunk(meta, false)
return chk, err
}
// ChunkWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
return h.chunk(meta, true)
}
// chunk returns the chunk for the reference number.
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
// Also returns max time of the chunk.
func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack() sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
s := h.head.series.getByID(sid) s := h.head.series.getByID(sid)
// This means that the series has been garbage collected. // This means that the series has been garbage collected.
if s == nil { if s == nil {
return nil, storage.ErrNotFound return nil, 0, storage.ErrNotFound
} }
s.Lock() s.Lock()
c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) c, headChunk, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
if err != nil { if err != nil {
s.Unlock() s.Unlock()
return nil, err return nil, 0, err
} }
defer func() { defer func() {
if garbageCollect { if !headChunk {
// Set this to nil so that Go GC can collect it after it has been used. // Set this to nil so that Go GC can collect it after it has been used.
c.chunk = nil c.chunk = nil
h.head.memChunkPool.Put(c) h.head.memChunkPool.Put(c)
@ -299,22 +313,36 @@ func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
// This means that the chunk is outside the specified range. // This means that the chunk is outside the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) { if !c.OverlapsClosedInterval(h.mint, h.maxt) {
s.Unlock() s.Unlock()
return nil, storage.ErrNotFound return nil, 0, storage.ErrNotFound
}
chk, maxTime := c.chunk, c.maxTime
if headChunk && copyLastChunk {
// The caller may ask to copy the head chunk in order to take the
// bytes of the chunk without causing the race between read and append.
b := s.headChunk.chunk.Bytes()
newB := make([]byte, len(b))
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
// TODO(codesome): Put back in the pool (non-trivial).
chk, err = h.head.opts.ChunkPool.Get(s.headChunk.chunk.Encoding(), newB)
if err != nil {
return nil, 0, err
}
} }
s.Unlock() s.Unlock()
return &safeChunk{ return &safeChunk{
Chunk: c.chunk, Chunk: chk,
s: s, s: s,
cid: cid, cid: cid,
isoState: h.isoState, isoState: h.isoState,
}, nil }, maxTime, nil
} }
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk. // chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
// If garbageCollect is true, it means that the returned *memChunk // If headChunk is false, it means that the returned *memChunk
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage. // (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, garbageCollect bool, err error) { func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) {
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
@ -323,11 +351,12 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
if ix < 0 || ix > len(s.mmappedChunks) { if ix < 0 || ix > len(s.mmappedChunks) {
return nil, false, storage.ErrNotFound return nil, false, storage.ErrNotFound
} }
if ix == len(s.mmappedChunks) { if ix == len(s.mmappedChunks) {
if s.headChunk == nil { if s.headChunk == nil {
return nil, false, errors.New("invalid head chunk") return nil, false, errors.New("invalid head chunk")
} }
return s.headChunk, false, nil return s.headChunk, true, nil
} }
chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref)
if err != nil { if err != nil {
@ -340,7 +369,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
mc.chunk = chk mc.chunk = chk
mc.minTime = s.mmappedChunks[ix].minTime mc.minTime = s.mmappedChunks[ix].minTime
mc.maxTime = s.mmappedChunks[ix].maxTime mc.maxTime = s.mmappedChunks[ix].maxTime
return mc, true, nil return mc, false, nil
} }
// oooMergedChunk returns the requested chunk based on the given chunks.Meta // oooMergedChunk returns the requested chunk based on the given chunks.Meta
@ -395,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
break break
} }
if chunkRef == meta.OOOLastRef { switch {
case chunkRef == meta.OOOLastRef:
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{ meta: chunks.Meta{
MinTime: meta.OOOLastMinTime, MinTime: meta.OOOLastMinTime,
@ -406,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
origMinT: c.minTime, origMinT: c.minTime,
origMaxT: c.maxTime, origMaxT: c.maxTime,
}) })
} else if c.OverlapsClosedInterval(mint, maxt) { case c.OverlapsClosedInterval(mint, maxt):
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{ meta: chunks.Meta{
MinTime: c.minTime, MinTime: c.minTime,
@ -565,12 +595,14 @@ type boundedIterator struct {
func (b boundedIterator) Next() chunkenc.ValueType { func (b boundedIterator) Next() chunkenc.ValueType {
for b.Iterator.Next() == chunkenc.ValFloat { for b.Iterator.Next() == chunkenc.ValFloat {
t, _ := b.Iterator.At() t, _ := b.Iterator.At()
if t < b.minT { switch {
case t < b.minT:
continue continue
} else if t > b.maxT { case t > b.maxT:
return chunkenc.ValNone return chunkenc.ValNone
default:
return chunkenc.ValFloat
} }
return chunkenc.ValFloat
} }
return chunkenc.ValNone return chunkenc.ValNone
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package tsdb package tsdb
import ( import (
@ -40,6 +41,7 @@ import (
"github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/zeropool"
) )
// histogramRecord combines both RefHistogramSample and RefFloatHistogramSample // histogramRecord combines both RefHistogramSample and RefFloatHistogramSample
@ -74,41 +76,14 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
decoded = make(chan interface{}, 10) decoded = make(chan interface{}, 10)
decodeErr, seriesCreationErr error decodeErr, seriesCreationErr error
seriesPool = sync.Pool{
New: func() interface{} { seriesPool zeropool.Pool[[]record.RefSeries]
return []record.RefSeries{} samplesPool zeropool.Pool[[]record.RefSample]
}, tstonesPool zeropool.Pool[[]tombstones.Stone]
} exemplarsPool zeropool.Pool[[]record.RefExemplar]
samplesPool = sync.Pool{ histogramsPool zeropool.Pool[[]record.RefHistogramSample]
New: func() interface{} { floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
return []record.RefSample{} metadataPool zeropool.Pool[[]record.RefMetadata]
},
}
tstonesPool = sync.Pool{
New: func() interface{} {
return []tombstones.Stone{}
},
}
exemplarsPool = sync.Pool{
New: func() interface{} {
return []record.RefExemplar{}
},
}
histogramsPool = sync.Pool{
New: func() interface{} {
return []record.RefHistogramSample{}
},
}
floatHistogramsPool = sync.Pool{
New: func() interface{} {
return []record.RefFloatHistogramSample{}
},
}
metadataPool = sync.Pool{
New: func() interface{} {
return []record.RefMetadata{}
},
}
) )
defer func() { defer func() {
@ -167,7 +142,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
rec := r.Record() rec := r.Record()
switch dec.Type(rec) { switch dec.Type(rec) {
case record.Series: case record.Series:
series := seriesPool.Get().([]record.RefSeries)[:0] series := seriesPool.Get()[:0]
series, err = dec.Series(rec, series) series, err = dec.Series(rec, series)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -179,7 +154,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- series decoded <- series
case record.Samples: case record.Samples:
samples := samplesPool.Get().([]record.RefSample)[:0] samples := samplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples) samples, err = dec.Samples(rec, samples)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -191,7 +166,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- samples decoded <- samples
case record.Tombstones: case record.Tombstones:
tstones := tstonesPool.Get().([]tombstones.Stone)[:0] tstones := tstonesPool.Get()[:0]
tstones, err = dec.Tombstones(rec, tstones) tstones, err = dec.Tombstones(rec, tstones)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -203,7 +178,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- tstones decoded <- tstones
case record.Exemplars: case record.Exemplars:
exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0] exemplars := exemplarsPool.Get()[:0]
exemplars, err = dec.Exemplars(rec, exemplars) exemplars, err = dec.Exemplars(rec, exemplars)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -215,7 +190,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- exemplars decoded <- exemplars
case record.HistogramSamples: case record.HistogramSamples:
hists := histogramsPool.Get().([]record.RefHistogramSample)[:0] hists := histogramsPool.Get()[:0]
hists, err = dec.HistogramSamples(rec, hists) hists, err = dec.HistogramSamples(rec, hists)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -227,7 +202,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- hists decoded <- hists
case record.FloatHistogramSamples: case record.FloatHistogramSamples:
hists := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0] hists := floatHistogramsPool.Get()[:0]
hists, err = dec.FloatHistogramSamples(rec, hists) hists, err = dec.FloatHistogramSamples(rec, hists)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -239,7 +214,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
decoded <- hists decoded <- hists
case record.Metadata: case record.Metadata:
meta := metadataPool.Get().([]record.RefMetadata)[:0] meta := metadataPool.Get()[:0]
meta, err := dec.Metadata(rec, meta) meta, err := dec.Metadata(rec, meta)
if err != nil { if err != nil {
decodeErr = &wlog.CorruptionErr{ decodeErr = &wlog.CorruptionErr{
@ -278,7 +253,6 @@ Outer:
idx := uint64(mSeries.ref) % uint64(concurrency) idx := uint64(mSeries.ref) % uint64(concurrency)
processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries} processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries}
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
seriesPool.Put(v) seriesPool.Put(v)
case []record.RefSample: case []record.RefSample:
samples := v samples := v
@ -315,7 +289,6 @@ Outer:
} }
samples = samples[m:] samples = samples[m:]
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
samplesPool.Put(v) samplesPool.Put(v)
case []tombstones.Stone: case []tombstones.Stone:
for _, s := range v { for _, s := range v {
@ -327,16 +300,14 @@ Outer:
unknownRefs.Inc() unknownRefs.Inc()
continue continue
} }
h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv) h.tombstones.AddInterval(s.Ref, itv)
} }
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
tstonesPool.Put(v) tstonesPool.Put(v)
case []record.RefExemplar: case []record.RefExemplar:
for _, e := range v { for _, e := range v {
exemplarsInput <- e exemplarsInput <- e
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
exemplarsPool.Put(v) exemplarsPool.Put(v)
case []record.RefHistogramSample: case []record.RefHistogramSample:
samples := v samples := v
@ -373,7 +344,6 @@ Outer:
} }
samples = samples[m:] samples = samples[m:]
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
histogramsPool.Put(v) histogramsPool.Put(v)
case []record.RefFloatHistogramSample: case []record.RefFloatHistogramSample:
samples := v samples := v
@ -410,11 +380,10 @@ Outer:
} }
samples = samples[m:] samples = samples[m:]
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
floatHistogramsPool.Put(v) floatHistogramsPool.Put(v)
case []record.RefMetadata: case []record.RefMetadata:
for _, m := range v { for _, m := range v {
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) s := h.series.getByID(m.Ref)
if s == nil { if s == nil {
unknownMetadataRefs.Inc() unknownMetadataRefs.Inc()
continue continue
@ -425,7 +394,6 @@ Outer:
Help: m.Help, Help: m.Help,
} }
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
metadataPool.Put(v) metadataPool.Put(v)
default: default:
panic(fmt.Errorf("unexpected decoded type: %T", d)) panic(fmt.Errorf("unexpected decoded type: %T", d))
@ -793,7 +761,6 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
} }
samples = samples[m:] samples = samples[m:]
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
samplesPool.Put(d) samplesPool.Put(d)
case []record.RefMmapMarker: case []record.RefMmapMarker:
markers := v markers := v

View file

@ -536,7 +536,7 @@ func (w *Writer) finishSymbols() error {
// Write out the length and symbol count. // Write out the length and symbol count.
w.buf1.Reset() w.buf1.Reset()
w.buf1.PutBE32int(int(symbolTableSize)) w.buf1.PutBE32int(int(symbolTableSize))
w.buf1.PutBE32int(int(w.numSymbols)) w.buf1.PutBE32int(w.numSymbols)
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil { if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
return err return err
} }

View file

@ -224,7 +224,10 @@ func (p *MemPostings) All() Postings {
// EnsureOrder ensures that all postings lists are sorted. After it returns all further // EnsureOrder ensures that all postings lists are sorted. After it returns all further
// calls to add and addFor will insert new IDs in a sorted manner. // calls to add and addFor will insert new IDs in a sorted manner.
func (p *MemPostings) EnsureOrder() { // Parameter numberOfConcurrentProcesses is used to specify the maximal number of
// CPU cores used for this operation. If it is <= 0, GOMAXPROCS is used.
// GOMAXPROCS was the default before introducing this parameter.
func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) {
p.mtx.Lock() p.mtx.Lock()
defer p.mtx.Unlock() defer p.mtx.Unlock()
@ -232,13 +235,16 @@ func (p *MemPostings) EnsureOrder() {
return return
} }
n := runtime.GOMAXPROCS(0) concurrency := numberOfConcurrentProcesses
if concurrency <= 0 {
concurrency = runtime.GOMAXPROCS(0)
}
workc := make(chan *[][]storage.SeriesRef) workc := make(chan *[][]storage.SeriesRef)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(n) wg.Add(concurrency)
for i := 0; i < n; i++ { for i := 0; i < concurrency; i++ {
go func() { go func() {
for job := range workc { for job := range workc {
for _, l := range *job { for _, l := range *job {
@ -559,12 +565,11 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
for _, it := range p { for _, it := range p {
// NOTE: mergedPostings struct requires the user to issue an initial Next. // NOTE: mergedPostings struct requires the user to issue an initial Next.
if it.Next() { switch {
case it.Next():
ph = append(ph, it) ph = append(ph, it)
} else { case it.Err() != nil:
if it.Err() != nil { return &mergedPostings{err: it.Err()}, true
return &mergedPostings{err: it.Err()}, true
}
} }
} }
@ -697,17 +702,16 @@ func (rp *removedPostings) Next() bool {
rp.fok = rp.full.Next() rp.fok = rp.full.Next()
return true return true
} }
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
fcur, rcur := rp.full.At(), rp.remove.At() case fcur < rcur:
if fcur < rcur {
rp.cur = fcur rp.cur = fcur
rp.fok = rp.full.Next() rp.fok = rp.full.Next()
return true return true
} else if rcur < fcur { case rcur < fcur:
// Forward the remove postings to the right position. // Forward the remove postings to the right position.
rp.rok = rp.remove.Seek(fcur) rp.rok = rp.remove.Seek(fcur)
} else { default:
// Skip the current posting. // Skip the current posting.
rp.fok = rp.full.Next() rp.fok = rp.full.Next()
} }
@ -842,9 +846,10 @@ func (it *bigEndianPostings) Err() error {
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) { func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
h := make(postingsWithIndexHeap, 0, len(candidates)) h := make(postingsWithIndexHeap, 0, len(candidates))
for idx, it := range candidates { for idx, it := range candidates {
if it.Next() { switch {
case it.Next():
h = append(h, postingsWithIndex{index: idx, p: it}) h = append(h, postingsWithIndex{index: idx, p: it})
} else if it.Err() != nil { case it.Err() != nil:
return nil, it.Err() return nil, it.Err()
} }
} }

View file

@ -31,10 +31,10 @@ type maxHeap struct {
Items []Stat Items []Stat
} }
func (m *maxHeap) init(len int) { func (m *maxHeap) init(length int) {
m.maxLength = len m.maxLength = length
m.minValue = math.MaxUint64 m.minValue = math.MaxUint64
m.Items = make([]Stat, 0, len) m.Items = make([]Stat, 0, length)
} }
func (m *maxHeap) push(item Stat) { func (m *maxHeap) push(item Stat) {

View file

@ -254,7 +254,7 @@ func (txr *txRing) add(appendID uint64) {
if txr.txIDCount == len(txr.txIDs) { if txr.txIDCount == len(txr.txIDs) {
// Ring buffer is full, expand by doubling. // Ring buffer is full, expand by doubling.
newRing := make([]uint64, txr.txIDCount*2) newRing := make([]uint64, txr.txIDCount*2)
idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:]) idx := copy(newRing, txr.txIDs[txr.txIDFirst:])
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst]) copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
txr.txIDs = newRing txr.txIDs = newRing
txr.txIDFirst = 0 txr.txIDFirst = 0

View file

@ -78,7 +78,7 @@ func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) {
return nil, err return nil, err
} }
for _, s := range o.samples { for _, s := range o.samples {
app.Append(s.t, s.v) app.Append(s.t, s.f)
} }
return x, nil return x, nil
} }
@ -96,7 +96,7 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk,
if s.t > maxt { if s.t > maxt {
break break
} }
app.Append(s.t, s.v) app.Append(s.t, s.f)
} }
return x, nil return x, nil
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb package tsdb
import ( import (
@ -122,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
} }
} }
// There is nothing to do if we did not collect any chunk // There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 { if len(tmpChks) == 0 {
return nil return nil
} }
@ -135,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// chunks Meta the first chunk that overlaps with others. // chunks Meta the first chunk that overlaps with others.
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
// to return chunk Metas for chunk 5 and chunk 6 // to return chunk Metas for chunk 5 and chunk 6e
*chks = append(*chks, tmpChks[0]) *chks = append(*chks, tmpChks[0])
maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
for _, c := range tmpChks[1:] { for _, c := range tmpChks[1:] {
if c.MinTime > maxTime { switch {
case c.MinTime > maxTime:
*chks = append(*chks, c) *chks = append(*chks, c)
maxTime = c.MaxTime maxTime = c.MaxTime
} else if c.MaxTime > maxTime { case c.MaxTime > maxTime:
maxTime = c.MaxTime maxTime = c.MaxTime
(*chks)[len(*chks)-1].MaxTime = c.MaxTime (*chks)[len(*chks)-1].MaxTime = c.MaxTime
} }
@ -276,16 +278,18 @@ type OOOCompactionHead struct {
// All the above together have a bit of CPU and memory overhead, and can have a bit of impact // All the above together have a bit of CPU and memory overhead, and can have a bit of impact
// on the sample append latency. So call NewOOOCompactionHead only right before compaction. // on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) {
newWBLFile, err := head.wbl.NextSegmentSync() ch := &OOOCompactionHead{
if err != nil { chunkRange: head.chunkRange.Load(),
return nil, err mint: math.MaxInt64,
maxt: math.MinInt64,
} }
ch := &OOOCompactionHead{ if head.wbl != nil {
chunkRange: head.chunkRange.Load(), lastWBLFile, err := head.wbl.NextSegmentSync()
mint: math.MaxInt64, if err != nil {
maxt: math.MinInt64, return nil, err
lastWBLFile: newWBLFile, }
ch.lastWBLFile = lastWBLFile
} }
ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64) ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64)

View file

@ -180,7 +180,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints,
if sortSeries { if sortSeries {
p = q.index.SortedPostings(p) p = q.index.SortedPostings(p)
} }
return newBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
} }
func findSetMatches(pattern string) []string { func findSetMatches(pattern string) []string {
@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
} }
for _, m := range ms { for _, m := range ms {
if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. switch {
case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least.
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v) allPostings, err := ix.Postings(k, v)
if err != nil { if err != nil {
return nil, err return nil, err
} }
its = append(its, allPostings) its = append(its, allPostings)
} else if labelMustBeSet[m.Name] { case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter. // If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("") matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
if isNot && matchesEmpty { // l!="foo" switch {
case isNot && matchesEmpty: // l!="foo"
// If the label can't be empty and is a Not and the inner matcher // If the label can't be empty and is a Not and the inner matcher
// doesn't match empty, then subtract it out at the end. // doesn't match empty, then subtract it out at the end.
inverse, err := m.Inverse() inverse, err := m.Inverse()
@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
return nil, err return nil, err
} }
notIts = append(notIts, it) notIts = append(notIts, it)
} else if isNot && !matchesEmpty { // l!="" case isNot && !matchesEmpty: // l!=""
// If the label can't be empty and is a Not, but the inner matcher can // If the label can't be empty and is a Not, but the inner matcher can
// be empty we need to use inversePostingsForMatcher. // be empty we need to use inversePostingsForMatcher.
inverse, err := m.Inverse() inverse, err := m.Inverse()
@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
its = append(its, it) its = append(its, it)
} else { // l="a" default: // l="a"
// Non-Not matcher, use normal postingsForMatcher. // Non-Not matcher, use normal postingsForMatcher.
it, err := postingsForMatcher(ix, m) it, err := postingsForMatcher(ix, m)
if err != nil { if err != nil {
@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
} }
its = append(its, it) its = append(its, it)
} }
} else { // l="" default: // l=""
// If the matchers for a labelname selects an empty value, it selects all // If the matchers for a labelname selects an empty value, it selects all
// the series which don't have the label name set too. See: // the series which don't have the label name set too. See:
// https://github.com/prometheus/prometheus/issues/3575 and // https://github.com/prometheus/prometheus/issues/3575 and
@ -438,7 +440,7 @@ func (s *seriesData) Labels() labels.Labels { return s.labels }
// blockBaseSeriesSet allows to iterate over all series in the single block. // blockBaseSeriesSet allows to iterate over all series in the single block.
// Iterated series are trimmed with given min and max time as well as tombstones. // Iterated series are trimmed with given min and max time as well as tombstones.
// See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. // See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating.
type blockBaseSeriesSet struct { type blockBaseSeriesSet struct {
blockID ulid.ULID blockID ulid.ULID
p index.Postings p index.Postings
@ -584,7 +586,11 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
p.currChkMeta = chunks.Meta{} p.currChkMeta = chunks.Meta{}
} }
func (p *populateWithDelGenericSeriesIterator) next() bool { // If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
// is deep copied to avoid races between reads and copying chunk bytes.
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
if p.err != nil || p.i >= len(p.chks)-1 { if p.err != nil || p.i >= len(p.chks)-1 {
return false return false
} }
@ -592,12 +598,6 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
p.i++ p.i++
p.currChkMeta = p.chks[p.i] p.currChkMeta = p.chks[p.i]
p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
if p.err != nil {
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
return false
}
p.bufIter.Intervals = p.bufIter.Intervals[:0] p.bufIter.Intervals = p.bufIter.Intervals[:0]
for _, interval := range p.intervals { for _, interval := range p.intervals {
if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
@ -605,22 +605,28 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
} }
} }
// Re-encode head chunks that are still open (being appended to) or hcr, ok := p.chunks.(*headChunkReader)
// outside the compacted MaxTime range. if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
// The chunk.Bytes() method is not safe for open chunks hence the re-encoding. // ChunkWithCopy will copy the head chunk.
// This happens when snapshotting the head block or just fetching chunks from TSDB. var maxt int64
// p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta)
// TODO(codesome): think how to avoid the typecasting to verify when it is head block. // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
_, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk) p.currChkMeta.MaxTime = maxt
if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) { } else {
// If there is no overlap with deletion intervals AND it's NOT p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
// an "open" head chunk, we can take chunk as it is. }
if p.err != nil {
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
return false
}
if len(p.bufIter.Intervals) == 0 {
// If there is no overlap with deletion intervals, we can take chunk as it is.
p.currDelIter = nil p.currDelIter = nil
return true return true
} }
// We don't want the full chunk, or it's potentially still opened, take // We don't want the full chunk, take just a part of it.
// just a part of it.
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter) p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter p.currDelIter = &p.bufIter
return true return true
@ -677,7 +683,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
} }
} }
for p.next() { for p.next(false) {
if p.currDelIter != nil { if p.currDelIter != nil {
p.curr = p.currDelIter p.curr = p.currDelIter
} else { } else {
@ -742,7 +748,7 @@ func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkRe
} }
func (p *populateWithDelChunkSeriesIterator) Next() bool { func (p *populateWithDelChunkSeriesIterator) Next() bool {
if !p.next() { if !p.next(true) {
return false return false
} }
p.curr = p.currChkMeta p.curr = p.currChkMeta
@ -920,7 +926,7 @@ type blockChunkSeriesSet struct {
blockBaseSeriesSet blockBaseSeriesSet
} }
func newBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { func NewBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet {
return &blockChunkSeriesSet{ return &blockChunkSeriesSet{
blockBaseSeriesSet{ blockBaseSeriesSet{
blockID: id, blockID: id,
@ -954,39 +960,45 @@ type mergedStringIter struct {
b index.StringIter b index.StringIter
aok, bok bool aok, bok bool
cur string cur string
err error
} }
func (m *mergedStringIter) Next() bool { func (m *mergedStringIter) Next() bool {
if (!m.aok && !m.bok) || (m.Err() != nil) { if (!m.aok && !m.bok) || (m.Err() != nil) {
return false return false
} }
switch {
if !m.aok { case !m.aok:
m.cur = m.b.At() m.cur = m.b.At()
m.bok = m.b.Next() m.bok = m.b.Next()
} else if !m.bok { m.err = m.b.Err()
case !m.bok:
m.cur = m.a.At() m.cur = m.a.At()
m.aok = m.a.Next() m.aok = m.a.Next()
} else if m.b.At() > m.a.At() { m.err = m.a.Err()
case m.b.At() > m.a.At():
m.cur = m.a.At() m.cur = m.a.At()
m.aok = m.a.Next() m.aok = m.a.Next()
} else if m.a.At() > m.b.At() { m.err = m.a.Err()
case m.a.At() > m.b.At():
m.cur = m.b.At() m.cur = m.b.At()
m.bok = m.b.Next() m.bok = m.b.Next()
} else { // Equal. m.err = m.b.Err()
default: // Equal.
m.cur = m.b.At() m.cur = m.b.At()
m.aok = m.a.Next() m.aok = m.a.Next()
m.err = m.a.Err()
m.bok = m.b.Next() m.bok = m.b.Next()
if m.err == nil {
m.err = m.b.Err()
}
} }
return true return true
} }
func (m mergedStringIter) At() string { return m.cur } func (m mergedStringIter) At() string { return m.cur }
func (m mergedStringIter) Err() error { func (m mergedStringIter) Err() error {
if m.a.Err() != nil { return m.err
return m.a.Err()
}
return m.b.Err()
} }
// DeletedIterator wraps chunk Iterator and makes sure any deleted metrics are not returned. // DeletedIterator wraps chunk Iterator and makes sure any deleted metrics are not returned.
@ -1075,7 +1087,7 @@ func newNopChunkReader() ChunkReader {
} }
} }
func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) {
return cr.emptyChunk, nil return cr.emptyChunk, nil
} }

View file

@ -190,9 +190,10 @@ type Stone struct {
func ReadTombstones(dir string) (Reader, int64, error) { func ReadTombstones(dir string) (Reader, int64, error) {
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename)) b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
if os.IsNotExist(err) { switch {
case os.IsNotExist(err):
return NewMemTombstones(), 0, nil return NewMemTombstones(), 0, nil
} else if err != nil { case err != nil:
return nil, 0, err return nil, 0, err
} }

View file

@ -28,7 +28,7 @@ type Samples interface {
type Sample interface { type Sample interface {
T() int64 T() int64
V() float64 F() float64
H() *histogram.Histogram H() *histogram.Histogram
FH() *histogram.FloatHistogram FH() *histogram.FloatHistogram
Type() chunkenc.ValueType Type() chunkenc.ValueType
@ -69,7 +69,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
for i := 0; i < s.Len(); i++ { for i := 0; i < s.Len(); i++ {
switch sampleType { switch sampleType {
case chunkenc.ValFloat: case chunkenc.ValFloat:
ca.Append(s.Get(i).T(), s.Get(i).V()) ca.Append(s.Get(i).T(), s.Get(i).F())
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) ca.AppendHistogram(s.Get(i).T(), s.Get(i).H())
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
@ -87,7 +87,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
type sample struct { type sample struct {
t int64 t int64
v float64 f float64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram fh *histogram.FloatHistogram
} }
@ -96,8 +96,8 @@ func (s sample) T() int64 {
return s.t return s.t
} }
func (s sample) V() float64 { func (s sample) F() float64 {
return s.v return s.f
} }
func (s sample) H() *histogram.Histogram { func (s sample) H() *histogram.Histogram {
@ -123,7 +123,7 @@ func (s sample) Type() chunkenc.ValueType {
func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
samples := make([]Sample, numSamples) samples := make([]Sample, numSamples)
for i := 0; i < numSamples; i++ { for i := 0; i < numSamples; i++ {
samples[i] = sample{t: minTime + int64(i*1000), v: 1.0} samples[i] = sample{t: minTime + int64(i*1000), f: 1.0}
} }
return ChunkFromSamples(samples) return ChunkFromSamples(samples)
} }
@ -133,7 +133,7 @@ func GenerateSamples(start, numSamples int) []Sample {
return generateSamples(start, numSamples, func(i int) Sample { return generateSamples(start, numSamples, func(i int) Sample {
return sample{ return sample{
t: int64(i), t: int64(i),
v: float64(i), f: float64(i),
} }
}) })
} }

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb package tsdb
import ( import (
@ -38,6 +39,7 @@ import (
"github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/zeropool"
) )
// WALEntryType indicates what data a WAL entry contains. // WALEntryType indicates what data a WAL entry contains.
@ -89,7 +91,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics {
// WAL is a write ahead log that can log new series labels and samples. // WAL is a write ahead log that can log new series labels and samples.
// It must be completely read before new entries are logged. // It must be completely read before new entries are logged.
// //
// DEPRECATED: use wlog pkg combined with the record codex instead. // Deprecated: use wlog pkg combined with the record codex instead.
type WAL interface { type WAL interface {
Reader() WALReader Reader() WALReader
LogSeries([]record.RefSeries) error LogSeries([]record.RefSeries) error
@ -146,7 +148,7 @@ func newCRC32() hash.Hash32 {
// SegmentWAL is a write ahead log for series data. // SegmentWAL is a write ahead log for series data.
// //
// DEPRECATED: use wlog pkg combined with the record coders instead. // Deprecated: use wlog pkg combined with the record coders instead.
type SegmentWAL struct { type SegmentWAL struct {
mtx sync.Mutex mtx sync.Mutex
metrics *walMetrics metrics *walMetrics
@ -520,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
} }
}() }()
if n, err := f.Read(metab); err != nil { switch n, err := f.Read(metab); {
case err != nil:
return nil, errors.Wrapf(err, "validate meta %q", f.Name()) return nil, errors.Wrapf(err, "validate meta %q", f.Name())
} else if n != 8 { case n != 8:
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
} }
@ -870,9 +873,9 @@ func (r *walReader) Read(
// Historically, the processing is the bottleneck with reading and decoding using only // Historically, the processing is the bottleneck with reading and decoding using only
// 15% of the CPU. // 15% of the CPU.
var ( var (
seriesPool sync.Pool seriesPool zeropool.Pool[[]record.RefSeries]
samplePool sync.Pool samplePool zeropool.Pool[[]record.RefSample]
deletePool sync.Pool deletePool zeropool.Pool[[]tombstones.Stone]
) )
donec := make(chan struct{}) donec := make(chan struct{})
datac := make(chan interface{}, 100) datac := make(chan interface{}, 100)
@ -886,19 +889,16 @@ func (r *walReader) Read(
if seriesf != nil { if seriesf != nil {
seriesf(v) seriesf(v)
} }
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
seriesPool.Put(v[:0]) seriesPool.Put(v[:0])
case []record.RefSample: case []record.RefSample:
if samplesf != nil { if samplesf != nil {
samplesf(v) samplesf(v)
} }
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
samplePool.Put(v[:0]) samplePool.Put(v[:0])
case []tombstones.Stone: case []tombstones.Stone:
if deletesf != nil { if deletesf != nil {
deletesf(v) deletesf(v)
} }
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
deletePool.Put(v[:0]) deletePool.Put(v[:0])
default: default:
level.Error(r.logger).Log("msg", "unexpected data type") level.Error(r.logger).Log("msg", "unexpected data type")
@ -915,11 +915,9 @@ func (r *walReader) Read(
// Those should generally be caught by entry decoding before. // Those should generally be caught by entry decoding before.
switch et { switch et {
case WALEntrySeries: case WALEntrySeries:
var series []record.RefSeries series := seriesPool.Get()
if v := seriesPool.Get(); v == nil { if series == nil {
series = make([]record.RefSeries, 0, 512) series = make([]record.RefSeries, 0, 512)
} else {
series = v.([]record.RefSeries)
} }
err = r.decodeSeries(flag, b, &series) err = r.decodeSeries(flag, b, &series)
@ -936,11 +934,9 @@ func (r *walReader) Read(
} }
} }
case WALEntrySamples: case WALEntrySamples:
var samples []record.RefSample samples := samplePool.Get()
if v := samplePool.Get(); v == nil { if samples == nil {
samples = make([]record.RefSample, 0, 512) samples = make([]record.RefSample, 0, 512)
} else {
samples = v.([]record.RefSample)
} }
err = r.decodeSamples(flag, b, &samples) err = r.decodeSamples(flag, b, &samples)
@ -958,11 +954,9 @@ func (r *walReader) Read(
} }
} }
case WALEntryDeletes: case WALEntryDeletes:
var deletes []tombstones.Stone deletes := deletePool.Get()
if v := deletePool.Get(); v == nil { if deletes == nil {
deletes = make([]tombstones.Stone, 0, 512) deletes = make([]tombstones.Stone, 0, 512)
} else {
deletes = v.([]tombstones.Stone)
} }
err = r.decodeDeletes(flag, b, &deletes) err = r.decodeDeletes(flag, b, &deletes)
@ -1070,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
tr := io.TeeReader(cr, r.crc32) tr := io.TeeReader(cr, r.crc32)
b := make([]byte, 6) b := make([]byte, 6)
if n, err := tr.Read(b); err != nil { switch n, err := tr.Read(b); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != 6 { case n != 6:
return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n) return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
} }
@ -1094,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
} }
buf := r.buf[:length] buf := r.buf[:length]
if n, err := tr.Read(buf); err != nil { switch n, err := tr.Read(buf); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != length { case n != length:
return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n) return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
} }
if n, err := cr.Read(b[:4]); err != nil { switch n, err := cr.Read(b[:4]); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != 4 { case n != 4:
return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n) return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
} }
if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp { if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {

View file

@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool {
// we return EOF and the user can try again later. If we have a full // we return EOF and the user can try again later. If we have a full
// page, buildRecord is guaranteed to return a record or a non-EOF; it // page, buildRecord is guaranteed to return a record or a non-EOF; it
// has checks the records fit in pages. // has checks the records fit in pages.
if ok, err := r.buildRecord(); ok { switch ok, err := r.buildRecord(); {
case ok:
return true return true
} else if err != nil && err != io.EOF { case err != nil && err != io.EOF:
r.err = err r.err = err
return false return false
} }

View file

@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore errors reading to end of segment whilst replaying the WAL. // Ignore errors reading to end of segment whilst replaying the WAL.
if !tail { if !tail {
if err != nil && errors.Cause(err) != io.EOF { switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
} else if reader.Offset() != size { case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
} }
return nil return nil
@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore all errors reading to end of segment whilst replaying the WAL. // Ignore all errors reading to end of segment whilst replaying the WAL.
if !tail { if !tail {
if err != nil && errors.Cause(err) != io.EOF { switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
} else if reader.Offset() != size { case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
} }
return nil return nil

View file

@ -37,6 +37,6 @@ func (c *MockContext) Err() error {
} }
// Value ignores the Value and always returns nil // Value ignores the Value and always returns nil
func (c *MockContext) Value(key interface{}) interface{} { func (c *MockContext) Value(interface{}) interface{} {
return nil return nil
} }

View file

@ -22,7 +22,7 @@ type roundTrip struct {
theError error theError error
} }
func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) { func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) {
return rt.theResponse, rt.theError return rt.theResponse, rt.theError
} }

View file

@ -0,0 +1,77 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package zeropool provides a zero-allocation type-safe alternative for sync.Pool, used to workaround staticheck SA6002.
// The contents of this package are brought from https://github.com/colega/zeropool because "little copying is better than little dependency".
package zeropool
import "sync"
// Pool is a type-safe pool of items that does not allocate pointers to items.
// That is not entirely true, it does allocate sometimes, but not most of the time,
// just like the usual sync.Pool pools items most of the time, except when they're evicted.
// It does that by storing the allocated pointers in a secondary pool instead of letting them go,
// so they can be used later to store the items again.
//
// Zero value of Pool[T] is valid, and it will return zero values of T if nothing is pooled.
type Pool[T any] struct {
// items holds pointers to the pooled items, which are valid to be used.
items sync.Pool
// pointers holds just pointers to the pooled item types.
// The values referenced by pointers are not valid to be used (as they're used by some other caller)
// and it is safe to overwrite these pointers.
pointers sync.Pool
}
// New creates a new Pool[T] with the given function to create new items.
// A Pool must not be copied after first use.
func New[T any](item func() T) Pool[T] {
return Pool[T]{
items: sync.Pool{
New: func() interface{} {
val := item()
return &val
},
},
}
}
// Get returns an item from the pool, creating a new one if necessary.
// Get may be called concurrently from multiple goroutines.
func (p *Pool[T]) Get() T {
pooled := p.items.Get()
if pooled == nil {
// The only way this can happen is when someone is using the zero-value of zeropool.Pool, and items pool is empty.
// We don't have a pointer to store in p.pointers, so just return the empty value.
var zero T
return zero
}
ptr := pooled.(*T)
item := *ptr // ptr still holds a reference to a copy of item, but nobody will use it.
p.pointers.Put(ptr)
return item
}
// Put adds an item to the pool.
func (p *Pool[T]) Put(item T) {
var ptr *T
if pooled := p.pointers.Get(); pooled != nil {
ptr = pooled.(*T)
} else {
ptr = new(T)
}
*ptr = item
p.items.Put(ptr)
}

View file

@ -5,4 +5,4 @@
package internal package internal
// Version is the current tagged release of the library. // Version is the current tagged release of the library.
const Version = "0.122.0" const Version = "0.123.0"

13
vendor/modules.txt vendored
View file

@ -4,7 +4,7 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional cloud.google.com/go/internal/optional
cloud.google.com/go/internal/trace cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version cloud.google.com/go/internal/version
# cloud.google.com/go/compute v1.19.2 # cloud.google.com/go/compute v1.19.3
## explicit; go 1.19 ## explicit; go 1.19
cloud.google.com/go/compute/internal cloud.google.com/go/compute/internal
# cloud.google.com/go/compute/metadata v0.2.3 # cloud.google.com/go/compute/metadata v0.2.3
@ -83,7 +83,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15 ## explicit; go 1.15
github.com/alecthomas/units github.com/alecthomas/units
# github.com/aws/aws-sdk-go v1.44.260 # github.com/aws/aws-sdk-go v1.44.265
## explicit; go 1.11 ## explicit; go 1.11
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awserr
@ -426,8 +426,8 @@ github.com/prometheus/common/sigv4
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
# github.com/prometheus/prometheus v0.43.1 # github.com/prometheus/prometheus v0.44.0
## explicit; go 1.18 ## explicit; go 1.19
github.com/prometheus/prometheus/config github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery
github.com/prometheus/prometheus/discovery/targetgroup github.com/prometheus/prometheus/discovery/targetgroup
@ -461,6 +461,7 @@ github.com/prometheus/prometheus/util/logging
github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/osutil
github.com/prometheus/prometheus/util/pool github.com/prometheus/prometheus/util/pool
github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/testutil
github.com/prometheus/prometheus/util/zeropool
# github.com/rivo/uniseg v0.4.4 # github.com/rivo/uniseg v0.4.4
## explicit; go 1.18 ## explicit; go 1.18
github.com/rivo/uniseg github.com/rivo/uniseg
@ -560,7 +561,7 @@ golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/hkdf golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/poly1305
# golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 # golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc
## explicit; go 1.20 ## explicit; go 1.20
golang.org/x/exp/constraints golang.org/x/exp/constraints
golang.org/x/exp/slices golang.org/x/exp/slices
@ -608,7 +609,7 @@ golang.org/x/time/rate
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/xerrors golang.org/x/xerrors
golang.org/x/xerrors/internal golang.org/x/xerrors/internal
# google.golang.org/api v0.122.0 # google.golang.org/api v0.123.0
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/api/googleapi google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport google.golang.org/api/googleapi/transport