From 534944b671929ea5cd788d82833804aeb9b2f0cc Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 12:01:55 +0200 Subject: [PATCH 01/59] vendor: `make vendor-update` --- go.mod | 8 +- go.sum | 16 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 85 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/s3/api.go | 818 +++--- .../aws/aws-sdk-go/service/s3/errors.go | 6 +- .../service/s3/s3manager/upload_input.go | 22 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../golang.org/x/sys/windows/exec_windows.go | 8 +- vendor/golang.org/x/sys/windows/mkerrors.bash | 7 + .../x/sys/windows/security_windows.go | 13 + .../x/sys/windows/syscall_windows.go | 177 +- .../golang.org/x/sys/windows/types_windows.go | 503 +++- .../x/sys/windows/zerrors_windows.go | 2570 +++++++++++++++++ .../x/sys/windows/zsyscall_windows.go | 247 +- vendor/modules.txt | 8 +- 16 files changed, 4025 insertions(+), 466 deletions(-) diff --git a/go.mod b/go.mod index 2d751a9e4..dd0ac52ac 100644 --- a/go.mod +++ b/go.mod @@ -10,14 +10,14 @@ require ( github.com/VictoriaMetrics/fasthttp v1.0.13 github.com/VictoriaMetrics/metrics v1.15.2 github.com/VictoriaMetrics/metricsql v0.14.0 - github.com/aws/aws-sdk-go v1.37.22 + github.com/aws/aws-sdk-go v1.37.26 github.com/cespare/xxhash/v2 v2.1.1 github.com/cheggaaa/pb/v3 v3.0.6 github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/fatih/color v1.10.0 // indirect github.com/golang/snappy v0.0.3 github.com/influxdata/influxdb v1.8.4 - github.com/klauspost/compress v1.11.9 + github.com/klauspost/compress v1.11.12 github.com/mattn/go-runewidth v0.0.10 // indirect github.com/prometheus/client_golang v1.9.0 // indirect github.com/prometheus/common v0.18.0 // indirect @@ -36,9 +36,9 @@ require ( golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b + golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 google.golang.org/api v0.40.0 - google.golang.org/genproto v0.0.0-20210302174412-5ede27ff9881 // indirect + google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb // indirect google.golang.org/grpc v1.36.0 // indirect gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 78d9a7aa1..7c821a6c5 100644 --- a/go.sum +++ b/go.sum @@ -123,8 +123,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.37.22 h1:cyZp8TvUbH9rrShdrwULtCj4pB5szddrw9aKHUsw1Ic= -github.com/aws/aws-sdk-go v1.37.22/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.26 h1:D9Qvyjlr6xFR0CspZ0imdASc5Y1WE/Sgyte4l+cUp44= +github.com/aws/aws-sdk-go v1.37.26/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -508,8 +508,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA= -github.com/klauspost/compress v1.11.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1005,8 +1005,8 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b h1:kHlr0tATeLRMEiZJu5CknOw/E8V6h69sXXQFGoPtjcc= -golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1174,8 +1174,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210302174412-5ede27ff9881 h1:SYuy3hIRsBIROE0aZwsJZOEJNC/n9/p0FmLEU9C31AE= -google.golang.org/genproto v0.0.0-20210302174412-5ede27ff9881/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb h1:hcskBH5qZCOa7WpTUFUFvoebnSFZBYpjykLtjIp9DVk= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 0c6fcdb01..12cfff430 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -21,6 +21,7 @@ const ( ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). @@ -121,6 +122,9 @@ var awsPartition = partition{ "ap-northeast-2": region{ Description: "Asia Pacific (Seoul)", }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, @@ -184,6 +188,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -239,6 +244,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -452,6 +458,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "api.ecr.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -706,6 +718,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -765,6 +778,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -915,6 +929,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1120,6 +1135,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1385,6 +1401,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1708,6 +1725,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1905,6 +1923,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1966,6 +1985,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2143,6 +2163,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2205,6 +2226,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2262,6 +2284,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2328,6 +2351,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2379,6 +2403,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2428,6 +2453,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2459,6 +2485,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2508,6 +2535,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2542,6 +2570,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "fips-ap-northeast-3": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "fips-ap-south-1": endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2655,6 +2689,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2707,6 +2742,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2786,9 +2822,19 @@ var awsPartition = partition{ "emr-containers": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -2808,6 +2854,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2839,6 +2886,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2888,6 +2936,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3184,6 +3233,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3239,6 +3289,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3699,6 +3750,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3792,6 +3844,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3865,6 +3918,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3981,6 +4035,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4280,6 +4335,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4354,6 +4410,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4976,6 +5033,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5033,6 +5091,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5136,6 +5195,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5338,6 +5398,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ Hostname: "s3.ap-southeast-1.amazonaws.com", @@ -5414,6 +5475,13 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "s3-control.ap-south-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -5609,6 +5677,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5850,6 +5919,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5960,6 +6030,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6082,6 +6153,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6134,6 +6206,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6185,6 +6258,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6240,6 +6314,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6385,6 +6460,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6452,6 +6528,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6501,6 +6578,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -7010,6 +7088,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 508ca6ae8..e31f79a3a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.22" +const SDKVersion = "1.37.26" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index cc1f3dbf5..8182416c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -67,7 +67,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation aborts a multipart upload. After a multipart upload is aborted, +// This action aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. @@ -76,10 +76,10 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // // To verify that all parts have been removed, so you don't get charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// operation and ensure that the parts list is empty. +// action and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to AbortMultipartUpload: // @@ -175,10 +175,10 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this operation to complete the upload. Upon receiving this request, +// you call this action to complete the upload. Upon receiving this request, // Amazon S3 concatenates all the parts in ascending order by part number to // create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This operation +// the parts list. You must ensure that the parts list is complete. This action // concatenates the parts that you provide in the list. For each part in the // list, you must provide the part number and the ETag value, returned after // that part was uploaded. @@ -199,7 +199,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // CompleteMultipartUpload has the following special errors: // @@ -306,10 +306,10 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Creates a copy of an object that is already stored in Amazon S3. // // You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, to copy an object greater than 5 GB, you must use the -// multipart upload Upload Part - Copy API. For more information, see Copy Object -// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy API. For more information, see Copy Object Using +// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -319,7 +319,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // A copy request might return an error when Amazon S3 receives the copy request // or while Amazon S3 is copying the files. If the error occurs before the copy -// operation starts, you receive a standard Amazon S3 error. If the error occurs +// action starts, you receive a standard Amazon S3 error. If the error occurs // during the copy operation, the error response is embedded in the 200 OK response. // This means that a 200 OK response can contain either a success or an error. // Design your application to parse the contents of the response and handle @@ -334,7 +334,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // The copy request charge is based on the storage class and Region that you // specify for the destination object. For pricing information, see Amazon S3 -// pricing (https://aws.amazon.com/s3/pricing/). +// pricing (http://aws.amazon.com/s3/pricing/). // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -404,7 +404,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -418,7 +418,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Storage Class Options // -// You can use the CopyObject operation to change the storage class of an object +// You can use the CopyObject action to change the storage class of an object // that is already stored in Amazon S3 using the StorageClass parameter. For // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 Service Developer Guide. @@ -459,8 +459,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon S3 Glacier. +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -678,10 +678,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation initiates a multipart upload and returns an upload ID. This -// upload ID is used to associate all of the parts in the specific multipart -// upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or // abort the multipart upload request. // @@ -691,12 +691,12 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // If you have configured a lifecycle rule to abort incomplete multipart uploads, // the upload must complete within the number of days specified in the bucket // lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort operation and Amazon S3 aborts the multipart upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // For information about the permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, @@ -716,7 +716,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // and decrypts it when you access it. You can provide your own encryption key, // or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or // Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. @@ -1083,7 +1083,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // permission to others. // // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources: // @@ -1164,17 +1164,17 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation removes default encryption from +// This implementation of the DELETE action removes default encryption from // the bucket. For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -1725,9 +1725,9 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation uses the policy subresource to -// delete the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than +// the root user of the AWS account that owns the bucket, the calling identity // must have the DeleteBucketPolicy permissions on the specified bucket and // belong to the bucket owner's account to use this operation. // @@ -2000,15 +2000,15 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration for a bucket. Amazon S3 -// returns a 200 OK response upon successfully deleting a website configuration -// on the specified bucket. You will get a 200 OK response if the website configuration +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration // you are trying to delete does not exist on the bucket. Amazon S3 returns // a 404 response if the bucket specified in the request does not exist. // -// This DELETE operation requires the S3:DeleteBucketWebsite permission. By -// default, only the bucket owner can delete the website configuration attached -// to a bucket. However, bucket owners can grant other users permission to delete +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete // the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite // permission. // @@ -2110,14 +2110,14 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). // To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // -// The following operation is related to DeleteObject: +// The following action is related to DeleteObject: // // * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // @@ -2285,27 +2285,27 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. If you know the object keys that you want to delete, -// then this operation provides a suitable alternative to sending individual -// delete requests, reducing per-request overhead. +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. // // The request contains a list of up to 1000 keys that you want to delete. In // the XML, you provide the object key names, and optionally, version IDs if // you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the +// bucket. For each key, Amazon S3 performs a delete action and returns the // result of that delete, success, or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion, the operation does not return any information about -// the delete in the response body. +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. // -// When performing this operation on an MFA Delete enabled bucket, that attempts +// When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether @@ -2489,8 +2489,8 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the accelerate subresource -// to return the Transfer Acceleration state of a bucket, which is either Enabled +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that // enables you to perform faster data transfers to and from Amazon S3. // @@ -2499,7 +2499,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You set the Transfer Acceleration state of an existing bucket to Enabled // or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) @@ -2511,7 +2511,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // For more information about transfer acceleration, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -2589,7 +2589,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the acl subresource to return +// This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of // the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission // is granted to the anonymous user, you can return the ACL of the bucket without @@ -2671,7 +2671,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation returns an analytics configuration +// This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration @@ -2679,11 +2679,11 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -3250,8 +3250,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier API description, -// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -3692,8 +3692,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // -// If notifications are not enabled on the bucket, the operation returns an -// empty NotificationConfiguration element. +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. // // By default, you must be the bucket owner to read the notification configuration // of a bucket. However, the bucket owner can use a bucket policy to grant permission @@ -3704,7 +3704,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketNotification: +// The following action is related to GetBucketNotification: // // * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) // @@ -3882,7 +3882,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // For more information about bucket policies, see Using Bucket Policies and // User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketPolicy: +// The following action is related to GetBucketPolicy: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -4055,11 +4055,11 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // can return a wrong result. // // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // -// This operation requires permissions for the s3:GetReplicationConfiguration -// action. For more information about permissions, see Using Bucket Policies -// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // // If you include the Filter element in a replication configuration, you must // also include the DeleteMarkerReplication and Priority elements. The response @@ -4408,7 +4408,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // For more information about hosting websites, see Hosting Websites on Amazon // S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This GET operation requires the S3:GetBucketWebsite permission. By default, +// This GET action requires the S3:GetBucketWebsite permission. By default, // only the bucket owner can read the bucket website configuration. However, // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. @@ -4518,7 +4518,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering // Deep Archive tiers, before you can retrieve the object you must first restore // a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this operation returns an InvalidObjectStateError error. For information +// Otherwise, this action returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not @@ -4561,8 +4561,8 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Versioning // -// By default, the GET operation returns the current version of an object. To -// return a different version, use the versionId subresource. +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. // // If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -5029,7 +5029,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // subresource associated with the object. // // To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET operation returns information about current version +// action. By default, the GET action returns information about current version // of an object. For a versioned bucket, you can have multiple versions of an // object in your bucket. To retrieve tags of any other version, use the versionId // query parameter. You also need permission for the s3:GetObjectVersionTagging @@ -5041,7 +5041,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// The following operation is related to GetObjectTagging: +// The following action is related to GetObjectTagging: // // * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // @@ -5131,7 +5131,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // This action is not supported by Amazon S3 on Outposts. // -// The following operation is related to GetObjectTorrent: +// The following action is related to GetObjectTorrent: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5305,9 +5305,9 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. // // If the bucket does not exist or you do not have permission to access it, // the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A @@ -5397,15 +5397,15 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. // -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. Because of this, if the HEAD request generates an error, it returns -// a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve -// the exact exception beyond these error codes. +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5459,7 +5459,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // * If you don’t have the s3:ListBucket permission, Amazon S3 returns // an HTTP status code 403 ("access denied") error. // -// The following operation is related to HeadObject: +// The following action is related to HeadObject: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5541,13 +5541,13 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated -// is set to false. If there are more configurations to list, IsTruncated is -// set to true, and there will be a value in NextContinuationToken. You use -// the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner @@ -5740,12 +5740,12 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration @@ -5841,12 +5841,12 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration @@ -6018,11 +6018,11 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. An in-progress multipart +// This action lists in-progress multipart uploads. An in-progress multipart // upload is a multipart upload that has been initiated using the Initiate Multipart // Upload request, but has not yet been completed or aborted. // -// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// This action returns at most 1,000 multipart uploads in the response. 1,000 // multipart uploads is the maximum number of uploads a response can include, // which is also the default value. You can further limit the number of uploads // in a response by specifying the max-uploads parameter in the response. If @@ -6039,7 +6039,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListMultipartUploads: // @@ -6340,8 +6340,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // to design your application to parse the contents of the response and handle // it appropriately. // -// This API has been revised. We recommend that you use the newer version, ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), // when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects. // @@ -6501,14 +6501,14 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // // To use this operation, you must have READ access to the bucket. // -// To use this operation in an AWS Identity and Access Management (IAM) policy, +// To use this action in an AWS Identity and Access Management (IAM) policy, // you must have permissions to perform the s3:ListBucket action. The bucket // owner has this permission by default and can grant this permission to others. // For more information about permissions, see Permissions Related to Bucket // Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // -// This section describes the latest revision of the API. We recommend that +// This section describes the latest revision of this action. We recommend that // you use this revised API for application development. For backward compatibility, // Amazon S3 continues to support the prior version of this API, ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). @@ -6673,7 +6673,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListParts: // @@ -6830,7 +6830,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // * Suspended – Disables accelerated data transfers to the bucket. // // The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// operation returns the transfer acceleration state of a bucket. +// action returns the transfer acceleration state of a bucket. // // After setting the Transfer Acceleration state of a bucket to Enabled, it // might take up to thirty minutes before the data transfer rates to the bucket @@ -7242,7 +7242,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // For more information about CORS, go to Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// Simple Storage Service User Guide. // // Related Resources // @@ -7329,7 +7329,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This operation uses the encryption subresource to configure default encryption +// This action uses the encryption subresource to configure default encryption // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon @@ -7337,19 +7337,19 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // specify default encryption using SSE-KMS, you can also configure Amazon S3 // Bucket Key. For information about default encryption, see Amazon S3 default // bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. For more information about +// S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service User Guide. // -// This operation requires AWS Signature Version 4. For more information, see -// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// This action requires AWS Signature Version 4. For more information, see Authenticating +// Requests (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Related Resources // @@ -7547,9 +7547,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the PUT operation adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. // // Amazon S3 inventory generates inventories of the objects in the bucket on // a daily or weekly basis, and the results are published to a flat file. The @@ -7562,7 +7562,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // the inventory daily or weekly. You can also configure what object metadata // to include and whether to inventory all object versions or only current versions. // For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You must create a bucket policy on the destination bucket to grant permissions // to Amazon S3 to write objects to the bucket in the defined location. For @@ -7574,7 +7574,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // permission to others. For more information about permissions, see Permissions // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Special Errors // @@ -7686,7 +7686,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see // Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // By default, all Amazon S3 resources, including buckets, objects, and related // subresources (for example, lifecycle configuration and website configuration) @@ -7708,7 +7708,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // For more information about permissions, see Managing Access Permissions to // your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). @@ -7725,7 +7725,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // the AWS account that created the bucket—can perform any of the operations. // A resource owner can also grant others permission to perform the operation. // For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Service User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8277,8 +8277,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // // -// This operation replaces the existing notification configuration with the -// configuration you include in the request body. +// This action replaces the existing notification configuration with the configuration +// you include in the request body. // // After Amazon S3 receives this request, it first verifies that any Amazon // Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon @@ -8298,8 +8298,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. // When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT operation -// will fail, and Amazon S3 will not add the configuration to your bucket. +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. // // Responses // @@ -8308,7 +8308,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // will also include the x-amz-sns-test-message-id header containing the message // ID of the test notification sent to the topic. // -// The following operation is related to PutBucketNotificationConfiguration: +// The following action is related to PutBucketNotificationConfiguration: // // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // @@ -8584,8 +8584,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // -// To perform this operation, the user or role performing the operation must -// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) // permission. // // Specify the replication configuration in the request body. In the replication @@ -8833,7 +8833,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // match the schema. // // * Error code: OperationAbortedError Description: A conflicting conditional -// operation is currently in progress against this resource. Please try again. +// action is currently in progress against this resource. Please try again. // // * Error code: InternalError Description: The service was unable to apply // the provided tag to the bucket. @@ -9040,7 +9040,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // document and any redirect rules. For more information, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// This PUT action requires the S3:PutBucketWebsite permission. By default, // only the bucket owner can configure the website attached to a bucket; however, // bucket owners can allow other users to set the website configuration by writing // a bucket policy that grants them the S3:PutBucketWebsite permission. @@ -9099,7 +9099,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // Amazon S3 has a limitation of 50 routing rules per website configuration. // If you require more than 50 routing rules, you can use object redirect. For // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9206,7 +9206,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // If you request server-side encryption using AWS Key Management Service (SSE-KMS), // you can enable an S3 Bucket Key at the object-level. For more information, // see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -9325,7 +9325,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // for a new or existing object in an S3 bucket. You must have WRITE_ACP permission // to set the ACL of an object. For more information, see What permissions can // I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // This action is not supported by Amazon S3 on Outposts. // @@ -9773,7 +9773,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * Code: MalformedXMLError Cause: The XML provided does not match the schema. // -// * Code: OperationAbortedError Cause: A conflicting conditional operation +// * Code: OperationAbortedError Cause: A conflicting conditional action // is currently in progress against this resource. Please try again. // // * Code: InternalError Cause: The service was unable to apply the provided @@ -9974,7 +9974,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // permission to others. For more information about permissions, see Permissions // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Querying Archives with Select Requests // @@ -9984,7 +9984,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // queries and custom analytics on your archived data without having to restore // your data to a hotter Amazon S3 tier. For an overview about select requests, // see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // When making a select request, do the following: // @@ -9995,13 +9995,13 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the storage class and encryption for the output objects stored in the // bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. For more information +// in the Amazon Simple Storage Service User Guide. For more information // about the S3 structure in the request body, see the following: PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing // Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// in the Amazon Simple Storage Service User Guide Protecting Data Using // Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon Simple Storage Service User Guide // // * Define the SQL expression for the SELECT type of restoration for your // query in the request body's SelectParameters structure. You can use expressions @@ -10017,7 +10017,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about using SQL with S3 Glacier Select restore, see // SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // When making a select request, you can also do the following: // @@ -10088,19 +10088,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to // a faster speed while it is in progress. For more information, see Upgrading // the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // To get the status of object restoration, you can send a HEAD request. Operations // return the x-amz-restore header, which provides information about the restoration // status, in the response. You can use Amazon S3 event notifications to notify // you when a restore is initiated or completed. For more information, see Configuring // Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // After restoring an archived object, you can update the restoration period // by reissuing the request with a new period. Amazon S3 updates the restoration @@ -10115,11 +10115,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the object in 3 days. For more information about lifecycle configuration, // see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon Simple Storage Service Developer Guide. +// in Amazon Simple Storage Service User Guide. // // Responses // -// A successful operation returns either the 200 OK or 202 Accepted status code. +// A successful action returns either the 200 OK or 202 Accepted status code. // // * If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -10146,7 +10146,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon Simple Storage Service User Guide // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10157,7 +10157,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier. +// This action is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -10234,7 +10234,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // -// This operation filters the contents of an Amazon S3 object based on a simple +// This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, // CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse @@ -10246,18 +10246,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference // for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Permissions // // You must have s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Object Data Formats // @@ -10280,13 +10280,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon Simple Storage Service Developer Guide. For objects that -// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// in the Amazon Simple Storage Service User Guide. For objects that are +// encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer // master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side // encryption is handled transparently, so you don't need to specify anything. // For more information about server-side encryption, including SSE-S3 and // SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Working with the Response Body // @@ -10297,8 +10297,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // GetObject Support // -// The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // // * Range: Although you can specify a scan range for an Amazon S3 Select // request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) @@ -10308,7 +10308,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot // specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. // For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Special Errors // @@ -10601,11 +10601,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // For more information on multipart uploads, go to Multipart Upload Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon Simple Storage Service Developer Guide . +// Amazon Simple Storage Service User Guide . // // For information on the permissions required to use the multipart upload API, -// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service User Guide. // // You can optionally request server-side encryption where Amazon S3 encrypts // your data as it writes it to disks in its data centers and decrypts it for @@ -10615,7 +10615,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // match the headers you used in the request to initiate the upload by using // CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless // you are using a customer-provided encryption key, you don't need to specify @@ -10731,10 +10731,10 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // The minimum allowable part size for a multipart upload is 5 MB. For more // information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action // and provide data in your request. // // You must initiate a multipart upload before you can upload any part. In response @@ -10745,15 +10745,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // * For conceptual information about multipart uploads, see Uploading Objects // Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. // // * For information about permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service User Guide. // -// * For information about copying objects using a single atomic operation -// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon Simple Storage Service Developer Guide. +// * For information about copying objects using a single atomic action vs. +// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service User Guide. // // * For information about using server-side encryption with customer-provided // encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) @@ -10876,17 +10876,17 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12161,17 +12161,17 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12447,17 +12447,17 @@ type CopyObjectInput struct { // The name of the destination bucket. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -12470,8 +12470,8 @@ type CopyObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a COPY operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -13364,17 +13364,17 @@ type CreateMultipartUploadInput struct { // The name of the bucket to which to initiate the upload // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -13387,7 +13387,7 @@ type CreateMultipartUploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with an object operation doesn’t affect bucket-level + // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` @@ -13485,7 +13485,7 @@ type CreateMultipartUploadInput struct { // object encryption. All GET and PUT requests for an object protected by AWS // KMS will fail if not made via SSL or using SigV4. For information about configuring // using any of the officially supported AWS SDKs and AWS CLI, see Specifying - // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -13778,17 +13778,17 @@ type CreateMultipartUploadOutput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15476,17 +15476,17 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15682,17 +15682,17 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15833,17 +15833,17 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -15991,7 +15991,7 @@ type DeleteObjectsOutput struct { // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` - // Container for a failed delete operation that describes the object that Amazon + // Container for a failed delete action that describes the object that Amazon // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` @@ -16462,9 +16462,9 @@ type Error struct { // Forbidden SOAP Fault Code Prefix: Client // // * Code: AccountProblem Description: There is a problem with your AWS account - // that prevents the operation from completing successfully. Contact AWS - // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client + // that prevents the action from completing successfully. Contact AWS Support + // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code + // Prefix: Client // // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource // has been disabled. Contact AWS Support for further assistance. HTTP Status @@ -16567,9 +16567,9 @@ type Error struct { // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client // - // * Code: InvalidObjectState Description: The operation is not valid for - // the current state of the object. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client // // * Code: InvalidPart Description: One or more of the specified parts could // not be found. The part might not have been uploaded, or the specified @@ -16734,7 +16734,7 @@ type Error struct { // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status // Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: OperationAborted Description: A conflicting conditional operation + // * Code: OperationAborted Description: A conflicting conditional action // is currently in progress against this resource. Try again. HTTP Status // Code: 409 Conflict SOAP Fault Code Prefix: Client // @@ -19353,11 +19353,11 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19527,17 +19527,17 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -19827,11 +19827,11 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose Legal Hold status you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19982,11 +19982,11 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -20183,7 +20183,7 @@ type GetObjectOutput struct { // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time + // Provides information about object restoration action and expiration time // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` @@ -20430,11 +20430,11 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -20585,17 +20585,17 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -21205,17 +21205,17 @@ type HeadBucketInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -21320,17 +21320,17 @@ type HeadObjectInput struct { // The name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -23849,17 +23849,17 @@ type ListMultipartUploadsInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -24206,7 +24206,7 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains // true. To return the additional keys, see key-marker @@ -24489,17 +24489,17 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -24527,7 +24527,7 @@ type ListObjectsInput struct { Marker *string `location:"querystring" locationName:"marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -24783,17 +24783,17 @@ type ListObjectsV2Input struct { // Bucket name to list. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -24823,7 +24823,7 @@ type ListObjectsV2Input struct { FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -25013,23 +25013,23 @@ type ListObjectsV2Output struct { KeyCount *int64 `type:"integer"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `type:"integer"` // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -25135,17 +25135,17 @@ type ListPartsInput struct { // The name of the bucket to which the parts are being uploaded. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -26975,7 +26975,7 @@ func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstr // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -27480,7 +27480,7 @@ type PutBucketCorsInput struct { // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon - // Simple Storage Service Developer Guide. + // Simple Storage Service User Guide. // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -29549,11 +29549,11 @@ type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the // ACL. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -29588,19 +29588,19 @@ type PutObjectAclInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Key for which the PUT operation was initiated. + // Key for which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -29798,19 +29798,19 @@ type PutObjectInput struct { // Object data. Body io.ReadSeeker `type:"blob"` - // The bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -29823,8 +29823,8 @@ type PutObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a PUT operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -29891,7 +29891,7 @@ type PutObjectInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Object key for which the PUT operation was initiated. + // Object key for which the PUT action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -30257,11 +30257,11 @@ type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a Legal Hold // on. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -30687,17 +30687,17 @@ type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object Retention // configuration to. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions. + // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The account id of the expected bucket owner. If the bucket is owned by a @@ -30863,17 +30863,17 @@ type PutObjectTaggingInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -32051,17 +32051,17 @@ type RestoreObjectInput struct { // The bucket name containing the object to restore. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -32074,7 +32074,7 @@ type RestoreObjectInput struct { // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Object key for which the operation was initiated. + // Object key for which the action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -32345,7 +32345,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { // Specifies the redirect behavior and when a redirect is applied. For more // information about routing rules, see Configuring advanced conditional redirects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. type RoutingRule struct { _ struct{} `type:"structure"` @@ -32453,7 +32453,7 @@ type Rule struct { // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon Simple Storage Service User Guide. Transition *Transition `type:"structure"` } @@ -33277,7 +33277,7 @@ type ServerSideEncryptionRule struct { // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. // // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon Simple Storage Service User Guide. BucketKeyEnabled *bool `type:"boolean"` } @@ -33844,7 +33844,7 @@ type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more information, // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon Simple Storage Service User Guide. // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` @@ -33977,7 +33977,7 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon Simple Storage Service User Guide. type Transition struct { _ struct{} `type:"structure"` @@ -34026,17 +34026,17 @@ type UploadPartCopyInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -34468,17 +34468,17 @@ type UploadPartInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index f64b55135..6d3e726cf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -48,13 +48,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier. + // This action is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // - // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon S3 Glacier. + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 6cac26fa8..ba319d9a6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -23,19 +23,19 @@ type UploadInput struct { // The readable body payload to send to S3. Body io.Reader - // The bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) // in the Amazon Simple Storage Service Developer Guide. @@ -48,8 +48,8 @@ type UploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a PUT operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -111,7 +111,7 @@ type UploadInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Object key for which the PUT operation was initiated. + // Object key for which the PUT action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 60ffa48a2..87f9d1e35 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -627,6 +627,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo +echo "//go:build ${GOARCH} && ${GOOS}" echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 924775b82..9eb1fb633 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -111,7 +111,9 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeList, } return nil, err } - al := (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0])) + const psize = unsafe.Sizeof(uintptr(0)) + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := (*ProcThreadAttributeList)(unsafe.Pointer(&make([]unsafe.Pointer, (size+psize-1)/psize)[0])) err = initializeProcThreadAttributeList(al, maxAttrCount, 0, &size) if err != nil { return nil, err @@ -120,8 +122,8 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeList, } // Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. -func (al *ProcThreadAttributeList) Update(attribute uintptr, flags uint32, value unsafe.Pointer, size uintptr, prevValue uintptr, returnedSize *uintptr) error { - return updateProcThreadAttribute(al, flags, attribute, uintptr(value), size, prevValue, returnedSize) +func (al *ProcThreadAttributeList) Update(attribute uintptr, flags uint32, value unsafe.Pointer, size uintptr, prevValue unsafe.Pointer, returnedSize *uintptr) error { + return updateProcThreadAttribute(al, flags, attribute, value, size, prevValue, returnedSize) } // Delete frees ProcThreadAttributeList's resources. diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index 2163843a1..58e0188fb 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -9,6 +9,8 @@ shopt -s nullglob winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } declare -A errors @@ -59,5 +61,10 @@ declare -A errors echo "$key $vtype = $value" done < "$winerror" + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + echo ")" } | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 69eb462c5..0e428ecbb 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -908,6 +908,19 @@ type SECURITY_DESCRIPTOR struct { dacl *ACL } +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + type SecurityAttributes struct { Length uint32 SecurityDescriptor *SECURITY_DESCRIPTOR diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 23238fad4..bb6aaf89e 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -8,6 +8,8 @@ package windows import ( errorspkg "errors" + "fmt" + "runtime" "sync" "syscall" "time" @@ -65,9 +67,8 @@ const ( LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -180,6 +181,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process //sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -216,7 +222,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW //sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList //sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList -//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value uintptr, size uintptr, prevvalue uintptr, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId @@ -251,13 +257,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW //sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) //sys FlushFileBuffers(handle Handle) (err error) //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW //sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) //sys FlushViewOfFile(addr uintptr, length uintptr) (err error) @@ -318,14 +325,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW //sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW //sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW //sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW //sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW //sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx @@ -340,6 +347,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) @@ -375,16 +383,36 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject //sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -792,6 +820,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo //sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar @@ -1514,3 +1543,129 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index c26088985..369cc91da 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -10,6 +10,10 @@ import ( "unsafe" ) +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 @@ -908,7 +912,11 @@ type StartupInfoEx struct { // To create a *ProcThreadAttributeList, use NewProcThreadAttributeList, and // free its memory using ProcThreadAttributeList.Delete. type ProcThreadAttributeList struct { - _ [1]byte + // This is of type unsafe.Pointer, not of type byte or uintptr, because + // the contents of it is mostly a list of pointers, and in most cases, + // that's a list of pointers to Go-allocated objects. In order to keep + // the GC from collecting these objects, we declare this as unsafe.Pointer. + _ [1]unsafe.Pointer } type ProcessInformation struct { @@ -2268,3 +2276,496 @@ type CommTimeouts struct { WriteTotalTimeoutMultiplier uint32 WriteTotalTimeoutConstant uint32 } + +// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. +type NTUnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTString is an ANSI string for NT native APIs, corresponding to STRING. +type NTString struct { + Length uint16 + MaximumLength uint16 + Buffer *byte +} + +type LIST_ENTRY struct { + Flink *LIST_ENTRY + Blink *LIST_ENTRY +} + +type LDR_DATA_TABLE_ENTRY struct { + reserved1 [2]uintptr + InMemoryOrderLinks LIST_ENTRY + reserved2 [2]uintptr + DllBase uintptr + reserved3 [2]uintptr + FullDllName NTUnicodeString + reserved4 [8]byte + reserved5 [3]uintptr + reserved6 uintptr + TimeDateStamp uint32 +} + +type PEB_LDR_DATA struct { + reserved1 [8]byte + reserved2 [3]uintptr + InMemoryOrderModuleList LIST_ENTRY +} + +type CURDIR struct { + DosPath NTUnicodeString + Handle Handle +} + +type RTL_DRIVE_LETTER_CURDIR struct { + Flags uint16 + Length uint16 + TimeStamp uint32 + DosPath NTString +} + +type RTL_USER_PROCESS_PARAMETERS struct { + MaximumLength, Length uint32 + + Flags, DebugFlags uint32 + + ConsoleHandle Handle + ConsoleFlags uint32 + StandardInput, StandardOutput, StandardError Handle + + CurrentDirectory CURDIR + DllPath NTUnicodeString + ImagePathName NTUnicodeString + CommandLine NTUnicodeString + Environment unsafe.Pointer + + StartingX, StartingY, CountX, CountY, CountCharsX, CountCharsY, FillAttribute uint32 + + WindowFlags, ShowWindowFlags uint32 + WindowTitle, DesktopInfo, ShellInfo, RuntimeData NTUnicodeString + CurrentDirectories [32]RTL_DRIVE_LETTER_CURDIR + + EnvironmentSize, EnvironmentVersion uintptr + + PackageDependencyData unsafe.Pointer + ProcessGroupId uint32 + LoaderThreads uint32 + + RedirectionDllName NTUnicodeString + HeapPartitionName NTUnicodeString + DefaultThreadpoolCpuSetMasks uintptr + DefaultThreadpoolCpuSetMaskCount uint32 +} + +type PEB struct { + reserved1 [2]byte + BeingDebugged byte + BitField byte + reserved3 uintptr + ImageBaseAddress uintptr + Ldr *PEB_LDR_DATA + ProcessParameters *RTL_USER_PROCESS_PARAMETERS + reserved4 [3]uintptr + AtlThunkSListPtr uintptr + reserved5 uintptr + reserved6 uint32 + reserved7 uintptr + reserved8 uint32 + AtlThunkSListPtr32 uint32 + reserved9 [45]uintptr + reserved10 [96]byte + PostProcessInitRoutine uintptr + reserved11 [128]byte + reserved12 [1]uintptr + SessionId uint32 +} + +type OBJECT_ATTRIBUTES struct { + Length uint32 + RootDirectory Handle + ObjectName *NTUnicodeString + Attributes uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + SecurityQoS *SECURITY_QUALITY_OF_SERVICE +} + +// Values for the Attributes member of OBJECT_ATTRIBUTES. +const ( + OBJ_INHERIT = 0x00000002 + OBJ_PERMANENT = 0x00000010 + OBJ_EXCLUSIVE = 0x00000020 + OBJ_CASE_INSENSITIVE = 0x00000040 + OBJ_OPENIF = 0x00000080 + OBJ_OPENLINK = 0x00000100 + OBJ_KERNEL_HANDLE = 0x00000200 + OBJ_FORCE_ACCESS_CHECK = 0x00000400 + OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800 + OBJ_DONT_REPARSE = 0x00001000 + OBJ_VALID_ATTRIBUTES = 0x00001FF2 +) + +type IO_STATUS_BLOCK struct { + Status NTStatus + Information uintptr +} + +type RTLP_CURDIR_REF struct { + RefCount int32 + Handle Handle +} + +type RTL_RELATIVE_NAME struct { + RelativeName NTUnicodeString + ContainingDirectory Handle + CurDirRef *RTLP_CURDIR_REF +} + +const ( + // CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_SUPERSEDE = 0x00000000 + FILE_OPEN = 0x00000001 + FILE_CREATE = 0x00000002 + FILE_OPEN_IF = 0x00000003 + FILE_OVERWRITE = 0x00000004 + FILE_OVERWRITE_IF = 0x00000005 + FILE_MAXIMUM_DISPOSITION = 0x00000005 + + // CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_DIRECTORY_FILE = 0x00000001 + FILE_WRITE_THROUGH = 0x00000002 + FILE_SEQUENTIAL_ONLY = 0x00000004 + FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008 + FILE_SYNCHRONOUS_IO_ALERT = 0x00000010 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_NON_DIRECTORY_FILE = 0x00000040 + FILE_CREATE_TREE_CONNECTION = 0x00000080 + FILE_COMPLETE_IF_OPLOCKED = 0x00000100 + FILE_NO_EA_KNOWLEDGE = 0x00000200 + FILE_OPEN_REMOTE_INSTANCE = 0x00000400 + FILE_RANDOM_ACCESS = 0x00000800 + FILE_DELETE_ON_CLOSE = 0x00001000 + FILE_OPEN_BY_FILE_ID = 0x00002000 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_NO_COMPRESSION = 0x00008000 + FILE_OPEN_REQUIRING_OPLOCK = 0x00010000 + FILE_DISALLOW_EXCLUSIVE = 0x00020000 + FILE_RESERVE_OPFILTER = 0x00100000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + FILE_OPEN_NO_RECALL = 0x00400000 + FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 + + // Parameter constants for NtCreateNamedPipeFile. + + FILE_PIPE_BYTE_STREAM_TYPE = 0x00000000 + FILE_PIPE_MESSAGE_TYPE = 0x00000001 + + FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000 + FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x00000002 + + FILE_PIPE_TYPE_VALID_MASK = 0x00000003 + + FILE_PIPE_BYTE_STREAM_MODE = 0x00000000 + FILE_PIPE_MESSAGE_MODE = 0x00000001 + + FILE_PIPE_QUEUE_OPERATION = 0x00000000 + FILE_PIPE_COMPLETE_OPERATION = 0x00000001 + + FILE_PIPE_INBOUND = 0x00000000 + FILE_PIPE_OUTBOUND = 0x00000001 + FILE_PIPE_FULL_DUPLEX = 0x00000002 + + FILE_PIPE_DISCONNECTED_STATE = 0x00000001 + FILE_PIPE_LISTENING_STATE = 0x00000002 + FILE_PIPE_CONNECTED_STATE = 0x00000003 + FILE_PIPE_CLOSING_STATE = 0x00000004 + + FILE_PIPE_CLIENT_END = 0x00000000 + FILE_PIPE_SERVER_END = 0x00000001 +) + +// ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. +const ( + ProcessBasicInformation = iota + ProcessQuotaLimits + ProcessIoCounters + ProcessVmCounters + ProcessTimes + ProcessBasePriority + ProcessRaisePriority + ProcessDebugPort + ProcessExceptionPort + ProcessAccessToken + ProcessLdtInformation + ProcessLdtSize + ProcessDefaultHardErrorMode + ProcessIoPortHandlers + ProcessPooledUsageAndLimits + ProcessWorkingSetWatch + ProcessUserModeIOPL + ProcessEnableAlignmentFaultFixup + ProcessPriorityClass + ProcessWx86Information + ProcessHandleCount + ProcessAffinityMask + ProcessPriorityBoost + ProcessDeviceMap + ProcessSessionInformation + ProcessForegroundInformation + ProcessWow64Information + ProcessImageFileName + ProcessLUIDDeviceMapsEnabled + ProcessBreakOnTermination + ProcessDebugObjectHandle + ProcessDebugFlags + ProcessHandleTracing + ProcessIoPriority + ProcessExecuteFlags + ProcessTlsInformation + ProcessCookie + ProcessImageInformation + ProcessCycleTime + ProcessPagePriority + ProcessInstrumentationCallback + ProcessThreadStackAllocation + ProcessWorkingSetWatchEx + ProcessImageFileNameWin32 + ProcessImageFileMapping + ProcessAffinityUpdateMode + ProcessMemoryAllocationMode + ProcessGroupInformation + ProcessTokenVirtualizationEnabled + ProcessConsoleHostProcess + ProcessWindowInformation + ProcessHandleInformation + ProcessMitigationPolicy + ProcessDynamicFunctionTableInformation + ProcessHandleCheckingMode + ProcessKeepAliveCount + ProcessRevokeFileHandles + ProcessWorkingSetControl + ProcessHandleTable + ProcessCheckStackExtentsMode + ProcessCommandLineInformation + ProcessProtectionInformation + ProcessMemoryExhaustion + ProcessFaultInformation + ProcessTelemetryIdInformation + ProcessCommitReleaseInformation + ProcessDefaultCpuSetsInformation + ProcessAllowedCpuSetsInformation + ProcessSubsystemProcess + ProcessJobMemoryInformation + ProcessInPrivate + ProcessRaiseUMExceptionOnInvalidHandleClose + ProcessIumChallengeResponse + ProcessChildProcessInformation + ProcessHighGraphicsPriorityInformation + ProcessSubsystemInformation + ProcessEnergyValues + ProcessActivityThrottleState + ProcessActivityThrottlePolicy + ProcessWin32kSyscallFilterInformation + ProcessDisableSystemAllowedCpuSets + ProcessWakeInformation + ProcessEnergyTrackingState + ProcessManageWritesToExecutableMemory + ProcessCaptureTrustletLiveDump + ProcessTelemetryCoverage + ProcessEnclaveInformation + ProcessEnableReadWriteVmLogging + ProcessUptimeInformation + ProcessImageSection + ProcessDebugAuthInformation + ProcessSystemResourceManagement + ProcessSequenceNumber + ProcessLoaderDetour + ProcessSecurityDomainInformation + ProcessCombineSecurityDomainsInformation + ProcessEnableLogging + ProcessLeapSecondInformation + ProcessFiberShadowStackAllocation + ProcessFreeFiberShadowStackAllocation + ProcessAltSystemCallInformation + ProcessDynamicEHContinuationTargets + ProcessDynamicEnforcedCetCompatibleRanges +) + +type PROCESS_BASIC_INFORMATION struct { + ExitStatus NTStatus + PebBaseAddress *PEB + AffinityMask uintptr + BasePriority int32 + UniqueProcessId uintptr + InheritedFromUniqueProcessId uintptr +} + +// Constants for LocalAlloc flags. +const ( + LMEM_FIXED = 0x0 + LMEM_MOVEABLE = 0x2 + LMEM_NOCOMPACT = 0x10 + LMEM_NODISCARD = 0x20 + LMEM_ZEROINIT = 0x40 + LMEM_MODIFY = 0x80 + LMEM_DISCARDABLE = 0xf00 + LMEM_VALID_FLAGS = 0xf72 + LMEM_INVALID_HANDLE = 0x8000 + LHND = LMEM_MOVEABLE | LMEM_ZEROINIT + LPTR = LMEM_FIXED | LMEM_ZEROINIT + NONZEROLHND = LMEM_MOVEABLE + NONZEROLPTR = LMEM_FIXED +) + +// Constants for the CreateNamedPipe-family of functions. +const ( + PIPE_ACCESS_INBOUND = 0x1 + PIPE_ACCESS_OUTBOUND = 0x2 + PIPE_ACCESS_DUPLEX = 0x3 + + PIPE_CLIENT_END = 0x0 + PIPE_SERVER_END = 0x1 + + PIPE_WAIT = 0x0 + PIPE_NOWAIT = 0x1 + PIPE_READMODE_BYTE = 0x0 + PIPE_READMODE_MESSAGE = 0x2 + PIPE_TYPE_BYTE = 0x0 + PIPE_TYPE_MESSAGE = 0x4 + PIPE_ACCEPT_REMOTE_CLIENTS = 0x0 + PIPE_REJECT_REMOTE_CLIENTS = 0x8 + + PIPE_UNLIMITED_INSTANCES = 255 +) + +// Constants for security attributes when opening named pipes. +const ( + SECURITY_ANONYMOUS = SecurityAnonymous << 16 + SECURITY_IDENTIFICATION = SecurityIdentification << 16 + SECURITY_IMPERSONATION = SecurityImpersonation << 16 + SECURITY_DELEGATION = SecurityDelegation << 16 + + SECURITY_CONTEXT_TRACKING = 0x40000 + SECURITY_EFFECTIVE_ONLY = 0x80000 + + SECURITY_SQOS_PRESENT = 0x100000 + SECURITY_VALID_SQOS_FLAGS = 0x1f0000 +) + +// ResourceID represents a 16-bit resource identifier, traditionally created with the MAKEINTRESOURCE macro. +type ResourceID uint16 + +// ResourceIDOrString must be either a ResourceID, to specify a resource or resource type by ID, +// or a string, to specify a resource or resource type by name. +type ResourceIDOrString interface{} + +// Predefined resource names and types. +var ( + // Predefined names. + CREATEPROCESS_MANIFEST_RESOURCE_ID ResourceID = 1 + ISOLATIONAWARE_MANIFEST_RESOURCE_ID ResourceID = 2 + ISOLATIONAWARE_NOSTATICIMPORT_MANIFEST_RESOURCE_ID ResourceID = 3 + ISOLATIONPOLICY_MANIFEST_RESOURCE_ID ResourceID = 4 + ISOLATIONPOLICY_BROWSER_MANIFEST_RESOURCE_ID ResourceID = 5 + MINIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 1 // inclusive + MAXIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 16 // inclusive + + // Predefined types. + RT_CURSOR ResourceID = 1 + RT_BITMAP ResourceID = 2 + RT_ICON ResourceID = 3 + RT_MENU ResourceID = 4 + RT_DIALOG ResourceID = 5 + RT_STRING ResourceID = 6 + RT_FONTDIR ResourceID = 7 + RT_FONT ResourceID = 8 + RT_ACCELERATOR ResourceID = 9 + RT_RCDATA ResourceID = 10 + RT_MESSAGETABLE ResourceID = 11 + RT_GROUP_CURSOR ResourceID = 12 + RT_GROUP_ICON ResourceID = 14 + RT_VERSION ResourceID = 16 + RT_DLGINCLUDE ResourceID = 17 + RT_PLUGPLAY ResourceID = 19 + RT_VXD ResourceID = 20 + RT_ANICURSOR ResourceID = 21 + RT_ANIICON ResourceID = 22 + RT_HTML ResourceID = 23 + RT_MANIFEST ResourceID = 24 +) + +type COAUTHIDENTITY struct { + User *uint16 + UserLength uint32 + Domain *uint16 + DomainLength uint32 + Password *uint16 + PasswordLength uint32 + Flags uint32 +} + +type COAUTHINFO struct { + AuthnSvc uint32 + AuthzSvc uint32 + ServerPrincName *uint16 + AuthnLevel uint32 + ImpersonationLevel uint32 + AuthIdentityData *COAUTHIDENTITY + Capabilities uint32 +} + +type COSERVERINFO struct { + Reserved1 uint32 + Aame *uint16 + AuthInfo *COAUTHINFO + Reserved2 uint32 +} + +type BIND_OPTS3 struct { + CbStruct uint32 + Flags uint32 + Mode uint32 + TickCountDeadline uint32 + TrackFlags uint32 + ClassContext uint32 + Locale uint32 + ServerInfo *COSERVERINFO + Hwnd HWND +} + +const ( + CLSCTX_INPROC_SERVER = 0x1 + CLSCTX_INPROC_HANDLER = 0x2 + CLSCTX_LOCAL_SERVER = 0x4 + CLSCTX_INPROC_SERVER16 = 0x8 + CLSCTX_REMOTE_SERVER = 0x10 + CLSCTX_INPROC_HANDLER16 = 0x20 + CLSCTX_RESERVED1 = 0x40 + CLSCTX_RESERVED2 = 0x80 + CLSCTX_RESERVED3 = 0x100 + CLSCTX_RESERVED4 = 0x200 + CLSCTX_NO_CODE_DOWNLOAD = 0x400 + CLSCTX_RESERVED5 = 0x800 + CLSCTX_NO_CUSTOM_MARSHAL = 0x1000 + CLSCTX_ENABLE_CODE_DOWNLOAD = 0x2000 + CLSCTX_NO_FAILURE_LOG = 0x4000 + CLSCTX_DISABLE_AAA = 0x8000 + CLSCTX_ENABLE_AAA = 0x10000 + CLSCTX_FROM_DEFAULT_CONTEXT = 0x20000 + CLSCTX_ACTIVATE_32_BIT_SERVER = 0x40000 + CLSCTX_ACTIVATE_64_BIT_SERVER = 0x80000 + CLSCTX_ENABLE_CLOAKING = 0x100000 + CLSCTX_APPCONTAINER = 0x400000 + CLSCTX_ACTIVATE_AAA_AS_IU = 0x800000 + CLSCTX_PS_DLL = 0x80000000 + + COINIT_MULTITHREADED = 0x0 + COINIT_APARTMENTTHREADED = 0x2 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +// Flag for QueryFullProcessImageName. +const PROCESS_NAME_NATIVE = 1 diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go index 562e7fc31..0cf658fbd 100644 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -6895,4 +6895,2574 @@ const ( WINML_ERR_INVALID_BINDING Handle = 0x88900002 WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 + STATUS_WAIT_0 NTStatus = 0x00000000 + STATUS_SUCCESS NTStatus = 0x00000000 + STATUS_WAIT_1 NTStatus = 0x00000001 + STATUS_WAIT_2 NTStatus = 0x00000002 + STATUS_WAIT_3 NTStatus = 0x00000003 + STATUS_WAIT_63 NTStatus = 0x0000003F + STATUS_ABANDONED NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_0 NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_63 NTStatus = 0x000000BF + STATUS_USER_APC NTStatus = 0x000000C0 + STATUS_ALREADY_COMPLETE NTStatus = 0x000000FF + STATUS_KERNEL_APC NTStatus = 0x00000100 + STATUS_ALERTED NTStatus = 0x00000101 + STATUS_TIMEOUT NTStatus = 0x00000102 + STATUS_PENDING NTStatus = 0x00000103 + STATUS_REPARSE NTStatus = 0x00000104 + STATUS_MORE_ENTRIES NTStatus = 0x00000105 + STATUS_NOT_ALL_ASSIGNED NTStatus = 0x00000106 + STATUS_SOME_NOT_MAPPED NTStatus = 0x00000107 + STATUS_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0x00000108 + STATUS_VOLUME_MOUNTED NTStatus = 0x00000109 + STATUS_RXACT_COMMITTED NTStatus = 0x0000010A + STATUS_NOTIFY_CLEANUP NTStatus = 0x0000010B + STATUS_NOTIFY_ENUM_DIR NTStatus = 0x0000010C + STATUS_NO_QUOTAS_FOR_ACCOUNT NTStatus = 0x0000010D + STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED NTStatus = 0x0000010E + STATUS_PAGE_FAULT_TRANSITION NTStatus = 0x00000110 + STATUS_PAGE_FAULT_DEMAND_ZERO NTStatus = 0x00000111 + STATUS_PAGE_FAULT_COPY_ON_WRITE NTStatus = 0x00000112 + STATUS_PAGE_FAULT_GUARD_PAGE NTStatus = 0x00000113 + STATUS_PAGE_FAULT_PAGING_FILE NTStatus = 0x00000114 + STATUS_CACHE_PAGE_LOCKED NTStatus = 0x00000115 + STATUS_CRASH_DUMP NTStatus = 0x00000116 + STATUS_BUFFER_ALL_ZEROS NTStatus = 0x00000117 + STATUS_REPARSE_OBJECT NTStatus = 0x00000118 + STATUS_RESOURCE_REQUIREMENTS_CHANGED NTStatus = 0x00000119 + STATUS_TRANSLATION_COMPLETE NTStatus = 0x00000120 + STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY NTStatus = 0x00000121 + STATUS_NOTHING_TO_TERMINATE NTStatus = 0x00000122 + STATUS_PROCESS_NOT_IN_JOB NTStatus = 0x00000123 + STATUS_PROCESS_IN_JOB NTStatus = 0x00000124 + STATUS_VOLSNAP_HIBERNATE_READY NTStatus = 0x00000125 + STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY NTStatus = 0x00000126 + STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED NTStatus = 0x00000127 + STATUS_INTERRUPT_STILL_CONNECTED NTStatus = 0x00000128 + STATUS_PROCESS_CLONED NTStatus = 0x00000129 + STATUS_FILE_LOCKED_WITH_ONLY_READERS NTStatus = 0x0000012A + STATUS_FILE_LOCKED_WITH_WRITERS NTStatus = 0x0000012B + STATUS_VALID_IMAGE_HASH NTStatus = 0x0000012C + STATUS_VALID_CATALOG_HASH NTStatus = 0x0000012D + STATUS_VALID_STRONG_CODE_HASH NTStatus = 0x0000012E + STATUS_GHOSTED NTStatus = 0x0000012F + STATUS_DATA_OVERWRITTEN NTStatus = 0x00000130 + STATUS_RESOURCEMANAGER_READ_ONLY NTStatus = 0x00000202 + STATUS_RING_PREVIOUSLY_EMPTY NTStatus = 0x00000210 + STATUS_RING_PREVIOUSLY_FULL NTStatus = 0x00000211 + STATUS_RING_PREVIOUSLY_ABOVE_QUOTA NTStatus = 0x00000212 + STATUS_RING_NEWLY_EMPTY NTStatus = 0x00000213 + STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT NTStatus = 0x00000214 + STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE NTStatus = 0x00000215 + STATUS_OPLOCK_HANDLE_CLOSED NTStatus = 0x00000216 + STATUS_WAIT_FOR_OPLOCK NTStatus = 0x00000367 + STATUS_REPARSE_GLOBAL NTStatus = 0x00000368 + STATUS_FLT_IO_COMPLETE NTStatus = 0x001C0001 + STATUS_OBJECT_NAME_EXISTS NTStatus = 0x40000000 + STATUS_THREAD_WAS_SUSPENDED NTStatus = 0x40000001 + STATUS_WORKING_SET_LIMIT_RANGE NTStatus = 0x40000002 + STATUS_IMAGE_NOT_AT_BASE NTStatus = 0x40000003 + STATUS_RXACT_STATE_CREATED NTStatus = 0x40000004 + STATUS_SEGMENT_NOTIFICATION NTStatus = 0x40000005 + STATUS_LOCAL_USER_SESSION_KEY NTStatus = 0x40000006 + STATUS_BAD_CURRENT_DIRECTORY NTStatus = 0x40000007 + STATUS_SERIAL_MORE_WRITES NTStatus = 0x40000008 + STATUS_REGISTRY_RECOVERED NTStatus = 0x40000009 + STATUS_FT_READ_RECOVERY_FROM_BACKUP NTStatus = 0x4000000A + STATUS_FT_WRITE_RECOVERY NTStatus = 0x4000000B + STATUS_SERIAL_COUNTER_TIMEOUT NTStatus = 0x4000000C + STATUS_NULL_LM_PASSWORD NTStatus = 0x4000000D + STATUS_IMAGE_MACHINE_TYPE_MISMATCH NTStatus = 0x4000000E + STATUS_RECEIVE_PARTIAL NTStatus = 0x4000000F + STATUS_RECEIVE_EXPEDITED NTStatus = 0x40000010 + STATUS_RECEIVE_PARTIAL_EXPEDITED NTStatus = 0x40000011 + STATUS_EVENT_DONE NTStatus = 0x40000012 + STATUS_EVENT_PENDING NTStatus = 0x40000013 + STATUS_CHECKING_FILE_SYSTEM NTStatus = 0x40000014 + STATUS_FATAL_APP_EXIT NTStatus = 0x40000015 + STATUS_PREDEFINED_HANDLE NTStatus = 0x40000016 + STATUS_WAS_UNLOCKED NTStatus = 0x40000017 + STATUS_SERVICE_NOTIFICATION NTStatus = 0x40000018 + STATUS_WAS_LOCKED NTStatus = 0x40000019 + STATUS_LOG_HARD_ERROR NTStatus = 0x4000001A + STATUS_ALREADY_WIN32 NTStatus = 0x4000001B + STATUS_WX86_UNSIMULATE NTStatus = 0x4000001C + STATUS_WX86_CONTINUE NTStatus = 0x4000001D + STATUS_WX86_SINGLE_STEP NTStatus = 0x4000001E + STATUS_WX86_BREAKPOINT NTStatus = 0x4000001F + STATUS_WX86_EXCEPTION_CONTINUE NTStatus = 0x40000020 + STATUS_WX86_EXCEPTION_LASTCHANCE NTStatus = 0x40000021 + STATUS_WX86_EXCEPTION_CHAIN NTStatus = 0x40000022 + STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE NTStatus = 0x40000023 + STATUS_NO_YIELD_PERFORMED NTStatus = 0x40000024 + STATUS_TIMER_RESUME_IGNORED NTStatus = 0x40000025 + STATUS_ARBITRATION_UNHANDLED NTStatus = 0x40000026 + STATUS_CARDBUS_NOT_SUPPORTED NTStatus = 0x40000027 + STATUS_WX86_CREATEWX86TIB NTStatus = 0x40000028 + STATUS_MP_PROCESSOR_MISMATCH NTStatus = 0x40000029 + STATUS_HIBERNATED NTStatus = 0x4000002A + STATUS_RESUME_HIBERNATION NTStatus = 0x4000002B + STATUS_FIRMWARE_UPDATED NTStatus = 0x4000002C + STATUS_DRIVERS_LEAKING_LOCKED_PAGES NTStatus = 0x4000002D + STATUS_MESSAGE_RETRIEVED NTStatus = 0x4000002E + STATUS_SYSTEM_POWERSTATE_TRANSITION NTStatus = 0x4000002F + STATUS_ALPC_CHECK_COMPLETION_LIST NTStatus = 0x40000030 + STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION NTStatus = 0x40000031 + STATUS_ACCESS_AUDIT_BY_POLICY NTStatus = 0x40000032 + STATUS_ABANDON_HIBERFILE NTStatus = 0x40000033 + STATUS_BIZRULES_NOT_ENABLED NTStatus = 0x40000034 + STATUS_FT_READ_FROM_COPY NTStatus = 0x40000035 + STATUS_IMAGE_AT_DIFFERENT_BASE NTStatus = 0x40000036 + STATUS_PATCH_DEFERRED NTStatus = 0x40000037 + STATUS_HEURISTIC_DAMAGE_POSSIBLE NTStatus = 0x40190001 + STATUS_GUARD_PAGE_VIOLATION NTStatus = 0x80000001 + STATUS_DATATYPE_MISALIGNMENT NTStatus = 0x80000002 + STATUS_BREAKPOINT NTStatus = 0x80000003 + STATUS_SINGLE_STEP NTStatus = 0x80000004 + STATUS_BUFFER_OVERFLOW NTStatus = 0x80000005 + STATUS_NO_MORE_FILES NTStatus = 0x80000006 + STATUS_WAKE_SYSTEM_DEBUGGER NTStatus = 0x80000007 + STATUS_HANDLES_CLOSED NTStatus = 0x8000000A + STATUS_NO_INHERITANCE NTStatus = 0x8000000B + STATUS_GUID_SUBSTITUTION_MADE NTStatus = 0x8000000C + STATUS_PARTIAL_COPY NTStatus = 0x8000000D + STATUS_DEVICE_PAPER_EMPTY NTStatus = 0x8000000E + STATUS_DEVICE_POWERED_OFF NTStatus = 0x8000000F + STATUS_DEVICE_OFF_LINE NTStatus = 0x80000010 + STATUS_DEVICE_BUSY NTStatus = 0x80000011 + STATUS_NO_MORE_EAS NTStatus = 0x80000012 + STATUS_INVALID_EA_NAME NTStatus = 0x80000013 + STATUS_EA_LIST_INCONSISTENT NTStatus = 0x80000014 + STATUS_INVALID_EA_FLAG NTStatus = 0x80000015 + STATUS_VERIFY_REQUIRED NTStatus = 0x80000016 + STATUS_EXTRANEOUS_INFORMATION NTStatus = 0x80000017 + STATUS_RXACT_COMMIT_NECESSARY NTStatus = 0x80000018 + STATUS_NO_MORE_ENTRIES NTStatus = 0x8000001A + STATUS_FILEMARK_DETECTED NTStatus = 0x8000001B + STATUS_MEDIA_CHANGED NTStatus = 0x8000001C + STATUS_BUS_RESET NTStatus = 0x8000001D + STATUS_END_OF_MEDIA NTStatus = 0x8000001E + STATUS_BEGINNING_OF_MEDIA NTStatus = 0x8000001F + STATUS_MEDIA_CHECK NTStatus = 0x80000020 + STATUS_SETMARK_DETECTED NTStatus = 0x80000021 + STATUS_NO_DATA_DETECTED NTStatus = 0x80000022 + STATUS_REDIRECTOR_HAS_OPEN_HANDLES NTStatus = 0x80000023 + STATUS_SERVER_HAS_OPEN_HANDLES NTStatus = 0x80000024 + STATUS_ALREADY_DISCONNECTED NTStatus = 0x80000025 + STATUS_LONGJUMP NTStatus = 0x80000026 + STATUS_CLEANER_CARTRIDGE_INSTALLED NTStatus = 0x80000027 + STATUS_PLUGPLAY_QUERY_VETOED NTStatus = 0x80000028 + STATUS_UNWIND_CONSOLIDATE NTStatus = 0x80000029 + STATUS_REGISTRY_HIVE_RECOVERED NTStatus = 0x8000002A + STATUS_DLL_MIGHT_BE_INSECURE NTStatus = 0x8000002B + STATUS_DLL_MIGHT_BE_INCOMPATIBLE NTStatus = 0x8000002C + STATUS_STOPPED_ON_SYMLINK NTStatus = 0x8000002D + STATUS_CANNOT_GRANT_REQUESTED_OPLOCK NTStatus = 0x8000002E + STATUS_NO_ACE_CONDITION NTStatus = 0x8000002F + STATUS_DEVICE_SUPPORT_IN_PROGRESS NTStatus = 0x80000030 + STATUS_DEVICE_POWER_CYCLE_REQUIRED NTStatus = 0x80000031 + STATUS_NO_WORK_DONE NTStatus = 0x80000032 + STATUS_CLUSTER_NODE_ALREADY_UP NTStatus = 0x80130001 + STATUS_CLUSTER_NODE_ALREADY_DOWN NTStatus = 0x80130002 + STATUS_CLUSTER_NETWORK_ALREADY_ONLINE NTStatus = 0x80130003 + STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE NTStatus = 0x80130004 + STATUS_CLUSTER_NODE_ALREADY_MEMBER NTStatus = 0x80130005 + STATUS_FLT_BUFFER_TOO_SMALL NTStatus = 0x801C0001 + STATUS_FVE_PARTIAL_METADATA NTStatus = 0x80210001 + STATUS_FVE_TRANSIENT_STATE NTStatus = 0x80210002 + STATUS_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH NTStatus = 0x8000CF00 + STATUS_UNSUCCESSFUL NTStatus = 0xC0000001 + STATUS_NOT_IMPLEMENTED NTStatus = 0xC0000002 + STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 + STATUS_INFO_LENGTH_MISMATCH NTStatus = 0xC0000004 + STATUS_ACCESS_VIOLATION NTStatus = 0xC0000005 + STATUS_IN_PAGE_ERROR NTStatus = 0xC0000006 + STATUS_PAGEFILE_QUOTA NTStatus = 0xC0000007 + STATUS_INVALID_HANDLE NTStatus = 0xC0000008 + STATUS_BAD_INITIAL_STACK NTStatus = 0xC0000009 + STATUS_BAD_INITIAL_PC NTStatus = 0xC000000A + STATUS_INVALID_CID NTStatus = 0xC000000B + STATUS_TIMER_NOT_CANCELED NTStatus = 0xC000000C + STATUS_INVALID_PARAMETER NTStatus = 0xC000000D + STATUS_NO_SUCH_DEVICE NTStatus = 0xC000000E + STATUS_NO_SUCH_FILE NTStatus = 0xC000000F + STATUS_INVALID_DEVICE_REQUEST NTStatus = 0xC0000010 + STATUS_END_OF_FILE NTStatus = 0xC0000011 + STATUS_WRONG_VOLUME NTStatus = 0xC0000012 + STATUS_NO_MEDIA_IN_DEVICE NTStatus = 0xC0000013 + STATUS_UNRECOGNIZED_MEDIA NTStatus = 0xC0000014 + STATUS_NONEXISTENT_SECTOR NTStatus = 0xC0000015 + STATUS_MORE_PROCESSING_REQUIRED NTStatus = 0xC0000016 + STATUS_NO_MEMORY NTStatus = 0xC0000017 + STATUS_CONFLICTING_ADDRESSES NTStatus = 0xC0000018 + STATUS_NOT_MAPPED_VIEW NTStatus = 0xC0000019 + STATUS_UNABLE_TO_FREE_VM NTStatus = 0xC000001A + STATUS_UNABLE_TO_DELETE_SECTION NTStatus = 0xC000001B + STATUS_INVALID_SYSTEM_SERVICE NTStatus = 0xC000001C + STATUS_ILLEGAL_INSTRUCTION NTStatus = 0xC000001D + STATUS_INVALID_LOCK_SEQUENCE NTStatus = 0xC000001E + STATUS_INVALID_VIEW_SIZE NTStatus = 0xC000001F + STATUS_INVALID_FILE_FOR_SECTION NTStatus = 0xC0000020 + STATUS_ALREADY_COMMITTED NTStatus = 0xC0000021 + STATUS_ACCESS_DENIED NTStatus = 0xC0000022 + STATUS_BUFFER_TOO_SMALL NTStatus = 0xC0000023 + STATUS_OBJECT_TYPE_MISMATCH NTStatus = 0xC0000024 + STATUS_NONCONTINUABLE_EXCEPTION NTStatus = 0xC0000025 + STATUS_INVALID_DISPOSITION NTStatus = 0xC0000026 + STATUS_UNWIND NTStatus = 0xC0000027 + STATUS_BAD_STACK NTStatus = 0xC0000028 + STATUS_INVALID_UNWIND_TARGET NTStatus = 0xC0000029 + STATUS_NOT_LOCKED NTStatus = 0xC000002A + STATUS_PARITY_ERROR NTStatus = 0xC000002B + STATUS_UNABLE_TO_DECOMMIT_VM NTStatus = 0xC000002C + STATUS_NOT_COMMITTED NTStatus = 0xC000002D + STATUS_INVALID_PORT_ATTRIBUTES NTStatus = 0xC000002E + STATUS_PORT_MESSAGE_TOO_LONG NTStatus = 0xC000002F + STATUS_INVALID_PARAMETER_MIX NTStatus = 0xC0000030 + STATUS_INVALID_QUOTA_LOWER NTStatus = 0xC0000031 + STATUS_DISK_CORRUPT_ERROR NTStatus = 0xC0000032 + STATUS_OBJECT_NAME_INVALID NTStatus = 0xC0000033 + STATUS_OBJECT_NAME_NOT_FOUND NTStatus = 0xC0000034 + STATUS_OBJECT_NAME_COLLISION NTStatus = 0xC0000035 + STATUS_PORT_DO_NOT_DISTURB NTStatus = 0xC0000036 + STATUS_PORT_DISCONNECTED NTStatus = 0xC0000037 + STATUS_DEVICE_ALREADY_ATTACHED NTStatus = 0xC0000038 + STATUS_OBJECT_PATH_INVALID NTStatus = 0xC0000039 + STATUS_OBJECT_PATH_NOT_FOUND NTStatus = 0xC000003A + STATUS_OBJECT_PATH_SYNTAX_BAD NTStatus = 0xC000003B + STATUS_DATA_OVERRUN NTStatus = 0xC000003C + STATUS_DATA_LATE_ERROR NTStatus = 0xC000003D + STATUS_DATA_ERROR NTStatus = 0xC000003E + STATUS_CRC_ERROR NTStatus = 0xC000003F + STATUS_SECTION_TOO_BIG NTStatus = 0xC0000040 + STATUS_PORT_CONNECTION_REFUSED NTStatus = 0xC0000041 + STATUS_INVALID_PORT_HANDLE NTStatus = 0xC0000042 + STATUS_SHARING_VIOLATION NTStatus = 0xC0000043 + STATUS_QUOTA_EXCEEDED NTStatus = 0xC0000044 + STATUS_INVALID_PAGE_PROTECTION NTStatus = 0xC0000045 + STATUS_MUTANT_NOT_OWNED NTStatus = 0xC0000046 + STATUS_SEMAPHORE_LIMIT_EXCEEDED NTStatus = 0xC0000047 + STATUS_PORT_ALREADY_SET NTStatus = 0xC0000048 + STATUS_SECTION_NOT_IMAGE NTStatus = 0xC0000049 + STATUS_SUSPEND_COUNT_EXCEEDED NTStatus = 0xC000004A + STATUS_THREAD_IS_TERMINATING NTStatus = 0xC000004B + STATUS_BAD_WORKING_SET_LIMIT NTStatus = 0xC000004C + STATUS_INCOMPATIBLE_FILE_MAP NTStatus = 0xC000004D + STATUS_SECTION_PROTECTION NTStatus = 0xC000004E + STATUS_EAS_NOT_SUPPORTED NTStatus = 0xC000004F + STATUS_EA_TOO_LARGE NTStatus = 0xC0000050 + STATUS_NONEXISTENT_EA_ENTRY NTStatus = 0xC0000051 + STATUS_NO_EAS_ON_FILE NTStatus = 0xC0000052 + STATUS_EA_CORRUPT_ERROR NTStatus = 0xC0000053 + STATUS_FILE_LOCK_CONFLICT NTStatus = 0xC0000054 + STATUS_LOCK_NOT_GRANTED NTStatus = 0xC0000055 + STATUS_DELETE_PENDING NTStatus = 0xC0000056 + STATUS_CTL_FILE_NOT_SUPPORTED NTStatus = 0xC0000057 + STATUS_UNKNOWN_REVISION NTStatus = 0xC0000058 + STATUS_REVISION_MISMATCH NTStatus = 0xC0000059 + STATUS_INVALID_OWNER NTStatus = 0xC000005A + STATUS_INVALID_PRIMARY_GROUP NTStatus = 0xC000005B + STATUS_NO_IMPERSONATION_TOKEN NTStatus = 0xC000005C + STATUS_CANT_DISABLE_MANDATORY NTStatus = 0xC000005D + STATUS_NO_LOGON_SERVERS NTStatus = 0xC000005E + STATUS_NO_SUCH_LOGON_SESSION NTStatus = 0xC000005F + STATUS_NO_SUCH_PRIVILEGE NTStatus = 0xC0000060 + STATUS_PRIVILEGE_NOT_HELD NTStatus = 0xC0000061 + STATUS_INVALID_ACCOUNT_NAME NTStatus = 0xC0000062 + STATUS_USER_EXISTS NTStatus = 0xC0000063 + STATUS_NO_SUCH_USER NTStatus = 0xC0000064 + STATUS_GROUP_EXISTS NTStatus = 0xC0000065 + STATUS_NO_SUCH_GROUP NTStatus = 0xC0000066 + STATUS_MEMBER_IN_GROUP NTStatus = 0xC0000067 + STATUS_MEMBER_NOT_IN_GROUP NTStatus = 0xC0000068 + STATUS_LAST_ADMIN NTStatus = 0xC0000069 + STATUS_WRONG_PASSWORD NTStatus = 0xC000006A + STATUS_ILL_FORMED_PASSWORD NTStatus = 0xC000006B + STATUS_PASSWORD_RESTRICTION NTStatus = 0xC000006C + STATUS_LOGON_FAILURE NTStatus = 0xC000006D + STATUS_ACCOUNT_RESTRICTION NTStatus = 0xC000006E + STATUS_INVALID_LOGON_HOURS NTStatus = 0xC000006F + STATUS_INVALID_WORKSTATION NTStatus = 0xC0000070 + STATUS_PASSWORD_EXPIRED NTStatus = 0xC0000071 + STATUS_ACCOUNT_DISABLED NTStatus = 0xC0000072 + STATUS_NONE_MAPPED NTStatus = 0xC0000073 + STATUS_TOO_MANY_LUIDS_REQUESTED NTStatus = 0xC0000074 + STATUS_LUIDS_EXHAUSTED NTStatus = 0xC0000075 + STATUS_INVALID_SUB_AUTHORITY NTStatus = 0xC0000076 + STATUS_INVALID_ACL NTStatus = 0xC0000077 + STATUS_INVALID_SID NTStatus = 0xC0000078 + STATUS_INVALID_SECURITY_DESCR NTStatus = 0xC0000079 + STATUS_PROCEDURE_NOT_FOUND NTStatus = 0xC000007A + STATUS_INVALID_IMAGE_FORMAT NTStatus = 0xC000007B + STATUS_NO_TOKEN NTStatus = 0xC000007C + STATUS_BAD_INHERITANCE_ACL NTStatus = 0xC000007D + STATUS_RANGE_NOT_LOCKED NTStatus = 0xC000007E + STATUS_DISK_FULL NTStatus = 0xC000007F + STATUS_SERVER_DISABLED NTStatus = 0xC0000080 + STATUS_SERVER_NOT_DISABLED NTStatus = 0xC0000081 + STATUS_TOO_MANY_GUIDS_REQUESTED NTStatus = 0xC0000082 + STATUS_GUIDS_EXHAUSTED NTStatus = 0xC0000083 + STATUS_INVALID_ID_AUTHORITY NTStatus = 0xC0000084 + STATUS_AGENTS_EXHAUSTED NTStatus = 0xC0000085 + STATUS_INVALID_VOLUME_LABEL NTStatus = 0xC0000086 + STATUS_SECTION_NOT_EXTENDED NTStatus = 0xC0000087 + STATUS_NOT_MAPPED_DATA NTStatus = 0xC0000088 + STATUS_RESOURCE_DATA_NOT_FOUND NTStatus = 0xC0000089 + STATUS_RESOURCE_TYPE_NOT_FOUND NTStatus = 0xC000008A + STATUS_RESOURCE_NAME_NOT_FOUND NTStatus = 0xC000008B + STATUS_ARRAY_BOUNDS_EXCEEDED NTStatus = 0xC000008C + STATUS_FLOAT_DENORMAL_OPERAND NTStatus = 0xC000008D + STATUS_FLOAT_DIVIDE_BY_ZERO NTStatus = 0xC000008E + STATUS_FLOAT_INEXACT_RESULT NTStatus = 0xC000008F + STATUS_FLOAT_INVALID_OPERATION NTStatus = 0xC0000090 + STATUS_FLOAT_OVERFLOW NTStatus = 0xC0000091 + STATUS_FLOAT_STACK_CHECK NTStatus = 0xC0000092 + STATUS_FLOAT_UNDERFLOW NTStatus = 0xC0000093 + STATUS_INTEGER_DIVIDE_BY_ZERO NTStatus = 0xC0000094 + STATUS_INTEGER_OVERFLOW NTStatus = 0xC0000095 + STATUS_PRIVILEGED_INSTRUCTION NTStatus = 0xC0000096 + STATUS_TOO_MANY_PAGING_FILES NTStatus = 0xC0000097 + STATUS_FILE_INVALID NTStatus = 0xC0000098 + STATUS_ALLOTTED_SPACE_EXCEEDED NTStatus = 0xC0000099 + STATUS_INSUFFICIENT_RESOURCES NTStatus = 0xC000009A + STATUS_DFS_EXIT_PATH_FOUND NTStatus = 0xC000009B + STATUS_DEVICE_DATA_ERROR NTStatus = 0xC000009C + STATUS_DEVICE_NOT_CONNECTED NTStatus = 0xC000009D + STATUS_DEVICE_POWER_FAILURE NTStatus = 0xC000009E + STATUS_FREE_VM_NOT_AT_BASE NTStatus = 0xC000009F + STATUS_MEMORY_NOT_ALLOCATED NTStatus = 0xC00000A0 + STATUS_WORKING_SET_QUOTA NTStatus = 0xC00000A1 + STATUS_MEDIA_WRITE_PROTECTED NTStatus = 0xC00000A2 + STATUS_DEVICE_NOT_READY NTStatus = 0xC00000A3 + STATUS_INVALID_GROUP_ATTRIBUTES NTStatus = 0xC00000A4 + STATUS_BAD_IMPERSONATION_LEVEL NTStatus = 0xC00000A5 + STATUS_CANT_OPEN_ANONYMOUS NTStatus = 0xC00000A6 + STATUS_BAD_VALIDATION_CLASS NTStatus = 0xC00000A7 + STATUS_BAD_TOKEN_TYPE NTStatus = 0xC00000A8 + STATUS_BAD_MASTER_BOOT_RECORD NTStatus = 0xC00000A9 + STATUS_INSTRUCTION_MISALIGNMENT NTStatus = 0xC00000AA + STATUS_INSTANCE_NOT_AVAILABLE NTStatus = 0xC00000AB + STATUS_PIPE_NOT_AVAILABLE NTStatus = 0xC00000AC + STATUS_INVALID_PIPE_STATE NTStatus = 0xC00000AD + STATUS_PIPE_BUSY NTStatus = 0xC00000AE + STATUS_ILLEGAL_FUNCTION NTStatus = 0xC00000AF + STATUS_PIPE_DISCONNECTED NTStatus = 0xC00000B0 + STATUS_PIPE_CLOSING NTStatus = 0xC00000B1 + STATUS_PIPE_CONNECTED NTStatus = 0xC00000B2 + STATUS_PIPE_LISTENING NTStatus = 0xC00000B3 + STATUS_INVALID_READ_MODE NTStatus = 0xC00000B4 + STATUS_IO_TIMEOUT NTStatus = 0xC00000B5 + STATUS_FILE_FORCED_CLOSED NTStatus = 0xC00000B6 + STATUS_PROFILING_NOT_STARTED NTStatus = 0xC00000B7 + STATUS_PROFILING_NOT_STOPPED NTStatus = 0xC00000B8 + STATUS_COULD_NOT_INTERPRET NTStatus = 0xC00000B9 + STATUS_FILE_IS_A_DIRECTORY NTStatus = 0xC00000BA + STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB + STATUS_REMOTE_NOT_LISTENING NTStatus = 0xC00000BC + STATUS_DUPLICATE_NAME NTStatus = 0xC00000BD + STATUS_BAD_NETWORK_PATH NTStatus = 0xC00000BE + STATUS_NETWORK_BUSY NTStatus = 0xC00000BF + STATUS_DEVICE_DOES_NOT_EXIST NTStatus = 0xC00000C0 + STATUS_TOO_MANY_COMMANDS NTStatus = 0xC00000C1 + STATUS_ADAPTER_HARDWARE_ERROR NTStatus = 0xC00000C2 + STATUS_INVALID_NETWORK_RESPONSE NTStatus = 0xC00000C3 + STATUS_UNEXPECTED_NETWORK_ERROR NTStatus = 0xC00000C4 + STATUS_BAD_REMOTE_ADAPTER NTStatus = 0xC00000C5 + STATUS_PRINT_QUEUE_FULL NTStatus = 0xC00000C6 + STATUS_NO_SPOOL_SPACE NTStatus = 0xC00000C7 + STATUS_PRINT_CANCELLED NTStatus = 0xC00000C8 + STATUS_NETWORK_NAME_DELETED NTStatus = 0xC00000C9 + STATUS_NETWORK_ACCESS_DENIED NTStatus = 0xC00000CA + STATUS_BAD_DEVICE_TYPE NTStatus = 0xC00000CB + STATUS_BAD_NETWORK_NAME NTStatus = 0xC00000CC + STATUS_TOO_MANY_NAMES NTStatus = 0xC00000CD + STATUS_TOO_MANY_SESSIONS NTStatus = 0xC00000CE + STATUS_SHARING_PAUSED NTStatus = 0xC00000CF + STATUS_REQUEST_NOT_ACCEPTED NTStatus = 0xC00000D0 + STATUS_REDIRECTOR_PAUSED NTStatus = 0xC00000D1 + STATUS_NET_WRITE_FAULT NTStatus = 0xC00000D2 + STATUS_PROFILING_AT_LIMIT NTStatus = 0xC00000D3 + STATUS_NOT_SAME_DEVICE NTStatus = 0xC00000D4 + STATUS_FILE_RENAMED NTStatus = 0xC00000D5 + STATUS_VIRTUAL_CIRCUIT_CLOSED NTStatus = 0xC00000D6 + STATUS_NO_SECURITY_ON_OBJECT NTStatus = 0xC00000D7 + STATUS_CANT_WAIT NTStatus = 0xC00000D8 + STATUS_PIPE_EMPTY NTStatus = 0xC00000D9 + STATUS_CANT_ACCESS_DOMAIN_INFO NTStatus = 0xC00000DA + STATUS_CANT_TERMINATE_SELF NTStatus = 0xC00000DB + STATUS_INVALID_SERVER_STATE NTStatus = 0xC00000DC + STATUS_INVALID_DOMAIN_STATE NTStatus = 0xC00000DD + STATUS_INVALID_DOMAIN_ROLE NTStatus = 0xC00000DE + STATUS_NO_SUCH_DOMAIN NTStatus = 0xC00000DF + STATUS_DOMAIN_EXISTS NTStatus = 0xC00000E0 + STATUS_DOMAIN_LIMIT_EXCEEDED NTStatus = 0xC00000E1 + STATUS_OPLOCK_NOT_GRANTED NTStatus = 0xC00000E2 + STATUS_INVALID_OPLOCK_PROTOCOL NTStatus = 0xC00000E3 + STATUS_INTERNAL_DB_CORRUPTION NTStatus = 0xC00000E4 + STATUS_INTERNAL_ERROR NTStatus = 0xC00000E5 + STATUS_GENERIC_NOT_MAPPED NTStatus = 0xC00000E6 + STATUS_BAD_DESCRIPTOR_FORMAT NTStatus = 0xC00000E7 + STATUS_INVALID_USER_BUFFER NTStatus = 0xC00000E8 + STATUS_UNEXPECTED_IO_ERROR NTStatus = 0xC00000E9 + STATUS_UNEXPECTED_MM_CREATE_ERR NTStatus = 0xC00000EA + STATUS_UNEXPECTED_MM_MAP_ERROR NTStatus = 0xC00000EB + STATUS_UNEXPECTED_MM_EXTEND_ERR NTStatus = 0xC00000EC + STATUS_NOT_LOGON_PROCESS NTStatus = 0xC00000ED + STATUS_LOGON_SESSION_EXISTS NTStatus = 0xC00000EE + STATUS_INVALID_PARAMETER_1 NTStatus = 0xC00000EF + STATUS_INVALID_PARAMETER_2 NTStatus = 0xC00000F0 + STATUS_INVALID_PARAMETER_3 NTStatus = 0xC00000F1 + STATUS_INVALID_PARAMETER_4 NTStatus = 0xC00000F2 + STATUS_INVALID_PARAMETER_5 NTStatus = 0xC00000F3 + STATUS_INVALID_PARAMETER_6 NTStatus = 0xC00000F4 + STATUS_INVALID_PARAMETER_7 NTStatus = 0xC00000F5 + STATUS_INVALID_PARAMETER_8 NTStatus = 0xC00000F6 + STATUS_INVALID_PARAMETER_9 NTStatus = 0xC00000F7 + STATUS_INVALID_PARAMETER_10 NTStatus = 0xC00000F8 + STATUS_INVALID_PARAMETER_11 NTStatus = 0xC00000F9 + STATUS_INVALID_PARAMETER_12 NTStatus = 0xC00000FA + STATUS_REDIRECTOR_NOT_STARTED NTStatus = 0xC00000FB + STATUS_REDIRECTOR_STARTED NTStatus = 0xC00000FC + STATUS_STACK_OVERFLOW NTStatus = 0xC00000FD + STATUS_NO_SUCH_PACKAGE NTStatus = 0xC00000FE + STATUS_BAD_FUNCTION_TABLE NTStatus = 0xC00000FF + STATUS_VARIABLE_NOT_FOUND NTStatus = 0xC0000100 + STATUS_DIRECTORY_NOT_EMPTY NTStatus = 0xC0000101 + STATUS_FILE_CORRUPT_ERROR NTStatus = 0xC0000102 + STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 + STATUS_BAD_LOGON_SESSION_STATE NTStatus = 0xC0000104 + STATUS_LOGON_SESSION_COLLISION NTStatus = 0xC0000105 + STATUS_NAME_TOO_LONG NTStatus = 0xC0000106 + STATUS_FILES_OPEN NTStatus = 0xC0000107 + STATUS_CONNECTION_IN_USE NTStatus = 0xC0000108 + STATUS_MESSAGE_NOT_FOUND NTStatus = 0xC0000109 + STATUS_PROCESS_IS_TERMINATING NTStatus = 0xC000010A + STATUS_INVALID_LOGON_TYPE NTStatus = 0xC000010B + STATUS_NO_GUID_TRANSLATION NTStatus = 0xC000010C + STATUS_CANNOT_IMPERSONATE NTStatus = 0xC000010D + STATUS_IMAGE_ALREADY_LOADED NTStatus = 0xC000010E + STATUS_ABIOS_NOT_PRESENT NTStatus = 0xC000010F + STATUS_ABIOS_LID_NOT_EXIST NTStatus = 0xC0000110 + STATUS_ABIOS_LID_ALREADY_OWNED NTStatus = 0xC0000111 + STATUS_ABIOS_NOT_LID_OWNER NTStatus = 0xC0000112 + STATUS_ABIOS_INVALID_COMMAND NTStatus = 0xC0000113 + STATUS_ABIOS_INVALID_LID NTStatus = 0xC0000114 + STATUS_ABIOS_SELECTOR_NOT_AVAILABLE NTStatus = 0xC0000115 + STATUS_ABIOS_INVALID_SELECTOR NTStatus = 0xC0000116 + STATUS_NO_LDT NTStatus = 0xC0000117 + STATUS_INVALID_LDT_SIZE NTStatus = 0xC0000118 + STATUS_INVALID_LDT_OFFSET NTStatus = 0xC0000119 + STATUS_INVALID_LDT_DESCRIPTOR NTStatus = 0xC000011A + STATUS_INVALID_IMAGE_NE_FORMAT NTStatus = 0xC000011B + STATUS_RXACT_INVALID_STATE NTStatus = 0xC000011C + STATUS_RXACT_COMMIT_FAILURE NTStatus = 0xC000011D + STATUS_MAPPED_FILE_SIZE_ZERO NTStatus = 0xC000011E + STATUS_TOO_MANY_OPENED_FILES NTStatus = 0xC000011F + STATUS_CANCELLED NTStatus = 0xC0000120 + STATUS_CANNOT_DELETE NTStatus = 0xC0000121 + STATUS_INVALID_COMPUTER_NAME NTStatus = 0xC0000122 + STATUS_FILE_DELETED NTStatus = 0xC0000123 + STATUS_SPECIAL_ACCOUNT NTStatus = 0xC0000124 + STATUS_SPECIAL_GROUP NTStatus = 0xC0000125 + STATUS_SPECIAL_USER NTStatus = 0xC0000126 + STATUS_MEMBERS_PRIMARY_GROUP NTStatus = 0xC0000127 + STATUS_FILE_CLOSED NTStatus = 0xC0000128 + STATUS_TOO_MANY_THREADS NTStatus = 0xC0000129 + STATUS_THREAD_NOT_IN_PROCESS NTStatus = 0xC000012A + STATUS_TOKEN_ALREADY_IN_USE NTStatus = 0xC000012B + STATUS_PAGEFILE_QUOTA_EXCEEDED NTStatus = 0xC000012C + STATUS_COMMITMENT_LIMIT NTStatus = 0xC000012D + STATUS_INVALID_IMAGE_LE_FORMAT NTStatus = 0xC000012E + STATUS_INVALID_IMAGE_NOT_MZ NTStatus = 0xC000012F + STATUS_INVALID_IMAGE_PROTECT NTStatus = 0xC0000130 + STATUS_INVALID_IMAGE_WIN_16 NTStatus = 0xC0000131 + STATUS_LOGON_SERVER_CONFLICT NTStatus = 0xC0000132 + STATUS_TIME_DIFFERENCE_AT_DC NTStatus = 0xC0000133 + STATUS_SYNCHRONIZATION_REQUIRED NTStatus = 0xC0000134 + STATUS_DLL_NOT_FOUND NTStatus = 0xC0000135 + STATUS_OPEN_FAILED NTStatus = 0xC0000136 + STATUS_IO_PRIVILEGE_FAILED NTStatus = 0xC0000137 + STATUS_ORDINAL_NOT_FOUND NTStatus = 0xC0000138 + STATUS_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000139 + STATUS_CONTROL_C_EXIT NTStatus = 0xC000013A + STATUS_LOCAL_DISCONNECT NTStatus = 0xC000013B + STATUS_REMOTE_DISCONNECT NTStatus = 0xC000013C + STATUS_REMOTE_RESOURCES NTStatus = 0xC000013D + STATUS_LINK_FAILED NTStatus = 0xC000013E + STATUS_LINK_TIMEOUT NTStatus = 0xC000013F + STATUS_INVALID_CONNECTION NTStatus = 0xC0000140 + STATUS_INVALID_ADDRESS NTStatus = 0xC0000141 + STATUS_DLL_INIT_FAILED NTStatus = 0xC0000142 + STATUS_MISSING_SYSTEMFILE NTStatus = 0xC0000143 + STATUS_UNHANDLED_EXCEPTION NTStatus = 0xC0000144 + STATUS_APP_INIT_FAILURE NTStatus = 0xC0000145 + STATUS_PAGEFILE_CREATE_FAILED NTStatus = 0xC0000146 + STATUS_NO_PAGEFILE NTStatus = 0xC0000147 + STATUS_INVALID_LEVEL NTStatus = 0xC0000148 + STATUS_WRONG_PASSWORD_CORE NTStatus = 0xC0000149 + STATUS_ILLEGAL_FLOAT_CONTEXT NTStatus = 0xC000014A + STATUS_PIPE_BROKEN NTStatus = 0xC000014B + STATUS_REGISTRY_CORRUPT NTStatus = 0xC000014C + STATUS_REGISTRY_IO_FAILED NTStatus = 0xC000014D + STATUS_NO_EVENT_PAIR NTStatus = 0xC000014E + STATUS_UNRECOGNIZED_VOLUME NTStatus = 0xC000014F + STATUS_SERIAL_NO_DEVICE_INITED NTStatus = 0xC0000150 + STATUS_NO_SUCH_ALIAS NTStatus = 0xC0000151 + STATUS_MEMBER_NOT_IN_ALIAS NTStatus = 0xC0000152 + STATUS_MEMBER_IN_ALIAS NTStatus = 0xC0000153 + STATUS_ALIAS_EXISTS NTStatus = 0xC0000154 + STATUS_LOGON_NOT_GRANTED NTStatus = 0xC0000155 + STATUS_TOO_MANY_SECRETS NTStatus = 0xC0000156 + STATUS_SECRET_TOO_LONG NTStatus = 0xC0000157 + STATUS_INTERNAL_DB_ERROR NTStatus = 0xC0000158 + STATUS_FULLSCREEN_MODE NTStatus = 0xC0000159 + STATUS_TOO_MANY_CONTEXT_IDS NTStatus = 0xC000015A + STATUS_LOGON_TYPE_NOT_GRANTED NTStatus = 0xC000015B + STATUS_NOT_REGISTRY_FILE NTStatus = 0xC000015C + STATUS_NT_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000015D + STATUS_DOMAIN_CTRLR_CONFIG_ERROR NTStatus = 0xC000015E + STATUS_FT_MISSING_MEMBER NTStatus = 0xC000015F + STATUS_ILL_FORMED_SERVICE_ENTRY NTStatus = 0xC0000160 + STATUS_ILLEGAL_CHARACTER NTStatus = 0xC0000161 + STATUS_UNMAPPABLE_CHARACTER NTStatus = 0xC0000162 + STATUS_UNDEFINED_CHARACTER NTStatus = 0xC0000163 + STATUS_FLOPPY_VOLUME NTStatus = 0xC0000164 + STATUS_FLOPPY_ID_MARK_NOT_FOUND NTStatus = 0xC0000165 + STATUS_FLOPPY_WRONG_CYLINDER NTStatus = 0xC0000166 + STATUS_FLOPPY_UNKNOWN_ERROR NTStatus = 0xC0000167 + STATUS_FLOPPY_BAD_REGISTERS NTStatus = 0xC0000168 + STATUS_DISK_RECALIBRATE_FAILED NTStatus = 0xC0000169 + STATUS_DISK_OPERATION_FAILED NTStatus = 0xC000016A + STATUS_DISK_RESET_FAILED NTStatus = 0xC000016B + STATUS_SHARED_IRQ_BUSY NTStatus = 0xC000016C + STATUS_FT_ORPHANING NTStatus = 0xC000016D + STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT NTStatus = 0xC000016E + STATUS_PARTITION_FAILURE NTStatus = 0xC0000172 + STATUS_INVALID_BLOCK_LENGTH NTStatus = 0xC0000173 + STATUS_DEVICE_NOT_PARTITIONED NTStatus = 0xC0000174 + STATUS_UNABLE_TO_LOCK_MEDIA NTStatus = 0xC0000175 + STATUS_UNABLE_TO_UNLOAD_MEDIA NTStatus = 0xC0000176 + STATUS_EOM_OVERFLOW NTStatus = 0xC0000177 + STATUS_NO_MEDIA NTStatus = 0xC0000178 + STATUS_NO_SUCH_MEMBER NTStatus = 0xC000017A + STATUS_INVALID_MEMBER NTStatus = 0xC000017B + STATUS_KEY_DELETED NTStatus = 0xC000017C + STATUS_NO_LOG_SPACE NTStatus = 0xC000017D + STATUS_TOO_MANY_SIDS NTStatus = 0xC000017E + STATUS_LM_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000017F + STATUS_KEY_HAS_CHILDREN NTStatus = 0xC0000180 + STATUS_CHILD_MUST_BE_VOLATILE NTStatus = 0xC0000181 + STATUS_DEVICE_CONFIGURATION_ERROR NTStatus = 0xC0000182 + STATUS_DRIVER_INTERNAL_ERROR NTStatus = 0xC0000183 + STATUS_INVALID_DEVICE_STATE NTStatus = 0xC0000184 + STATUS_IO_DEVICE_ERROR NTStatus = 0xC0000185 + STATUS_DEVICE_PROTOCOL_ERROR NTStatus = 0xC0000186 + STATUS_BACKUP_CONTROLLER NTStatus = 0xC0000187 + STATUS_LOG_FILE_FULL NTStatus = 0xC0000188 + STATUS_TOO_LATE NTStatus = 0xC0000189 + STATUS_NO_TRUST_LSA_SECRET NTStatus = 0xC000018A + STATUS_NO_TRUST_SAM_ACCOUNT NTStatus = 0xC000018B + STATUS_TRUSTED_DOMAIN_FAILURE NTStatus = 0xC000018C + STATUS_TRUSTED_RELATIONSHIP_FAILURE NTStatus = 0xC000018D + STATUS_EVENTLOG_FILE_CORRUPT NTStatus = 0xC000018E + STATUS_EVENTLOG_CANT_START NTStatus = 0xC000018F + STATUS_TRUST_FAILURE NTStatus = 0xC0000190 + STATUS_MUTANT_LIMIT_EXCEEDED NTStatus = 0xC0000191 + STATUS_NETLOGON_NOT_STARTED NTStatus = 0xC0000192 + STATUS_ACCOUNT_EXPIRED NTStatus = 0xC0000193 + STATUS_POSSIBLE_DEADLOCK NTStatus = 0xC0000194 + STATUS_NETWORK_CREDENTIAL_CONFLICT NTStatus = 0xC0000195 + STATUS_REMOTE_SESSION_LIMIT NTStatus = 0xC0000196 + STATUS_EVENTLOG_FILE_CHANGED NTStatus = 0xC0000197 + STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT NTStatus = 0xC0000198 + STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT NTStatus = 0xC0000199 + STATUS_NOLOGON_SERVER_TRUST_ACCOUNT NTStatus = 0xC000019A + STATUS_DOMAIN_TRUST_INCONSISTENT NTStatus = 0xC000019B + STATUS_FS_DRIVER_REQUIRED NTStatus = 0xC000019C + STATUS_IMAGE_ALREADY_LOADED_AS_DLL NTStatus = 0xC000019D + STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING NTStatus = 0xC000019E + STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME NTStatus = 0xC000019F + STATUS_SECURITY_STREAM_IS_INCONSISTENT NTStatus = 0xC00001A0 + STATUS_INVALID_LOCK_RANGE NTStatus = 0xC00001A1 + STATUS_INVALID_ACE_CONDITION NTStatus = 0xC00001A2 + STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT NTStatus = 0xC00001A3 + STATUS_NOTIFICATION_GUID_ALREADY_DEFINED NTStatus = 0xC00001A4 + STATUS_INVALID_EXCEPTION_HANDLER NTStatus = 0xC00001A5 + STATUS_DUPLICATE_PRIVILEGES NTStatus = 0xC00001A6 + STATUS_NOT_ALLOWED_ON_SYSTEM_FILE NTStatus = 0xC00001A7 + STATUS_REPAIR_NEEDED NTStatus = 0xC00001A8 + STATUS_QUOTA_NOT_ENABLED NTStatus = 0xC00001A9 + STATUS_NO_APPLICATION_PACKAGE NTStatus = 0xC00001AA + STATUS_FILE_METADATA_OPTIMIZATION_IN_PROGRESS NTStatus = 0xC00001AB + STATUS_NOT_SAME_OBJECT NTStatus = 0xC00001AC + STATUS_FATAL_MEMORY_EXHAUSTION NTStatus = 0xC00001AD + STATUS_ERROR_PROCESS_NOT_IN_JOB NTStatus = 0xC00001AE + STATUS_CPU_SET_INVALID NTStatus = 0xC00001AF + STATUS_IO_DEVICE_INVALID_DATA NTStatus = 0xC00001B0 + STATUS_IO_UNALIGNED_WRITE NTStatus = 0xC00001B1 + STATUS_NETWORK_OPEN_RESTRICTION NTStatus = 0xC0000201 + STATUS_NO_USER_SESSION_KEY NTStatus = 0xC0000202 + STATUS_USER_SESSION_DELETED NTStatus = 0xC0000203 + STATUS_RESOURCE_LANG_NOT_FOUND NTStatus = 0xC0000204 + STATUS_INSUFF_SERVER_RESOURCES NTStatus = 0xC0000205 + STATUS_INVALID_BUFFER_SIZE NTStatus = 0xC0000206 + STATUS_INVALID_ADDRESS_COMPONENT NTStatus = 0xC0000207 + STATUS_INVALID_ADDRESS_WILDCARD NTStatus = 0xC0000208 + STATUS_TOO_MANY_ADDRESSES NTStatus = 0xC0000209 + STATUS_ADDRESS_ALREADY_EXISTS NTStatus = 0xC000020A + STATUS_ADDRESS_CLOSED NTStatus = 0xC000020B + STATUS_CONNECTION_DISCONNECTED NTStatus = 0xC000020C + STATUS_CONNECTION_RESET NTStatus = 0xC000020D + STATUS_TOO_MANY_NODES NTStatus = 0xC000020E + STATUS_TRANSACTION_ABORTED NTStatus = 0xC000020F + STATUS_TRANSACTION_TIMED_OUT NTStatus = 0xC0000210 + STATUS_TRANSACTION_NO_RELEASE NTStatus = 0xC0000211 + STATUS_TRANSACTION_NO_MATCH NTStatus = 0xC0000212 + STATUS_TRANSACTION_RESPONDED NTStatus = 0xC0000213 + STATUS_TRANSACTION_INVALID_ID NTStatus = 0xC0000214 + STATUS_TRANSACTION_INVALID_TYPE NTStatus = 0xC0000215 + STATUS_NOT_SERVER_SESSION NTStatus = 0xC0000216 + STATUS_NOT_CLIENT_SESSION NTStatus = 0xC0000217 + STATUS_CANNOT_LOAD_REGISTRY_FILE NTStatus = 0xC0000218 + STATUS_DEBUG_ATTACH_FAILED NTStatus = 0xC0000219 + STATUS_SYSTEM_PROCESS_TERMINATED NTStatus = 0xC000021A + STATUS_DATA_NOT_ACCEPTED NTStatus = 0xC000021B + STATUS_NO_BROWSER_SERVERS_FOUND NTStatus = 0xC000021C + STATUS_VDM_HARD_ERROR NTStatus = 0xC000021D + STATUS_DRIVER_CANCEL_TIMEOUT NTStatus = 0xC000021E + STATUS_REPLY_MESSAGE_MISMATCH NTStatus = 0xC000021F + STATUS_MAPPED_ALIGNMENT NTStatus = 0xC0000220 + STATUS_IMAGE_CHECKSUM_MISMATCH NTStatus = 0xC0000221 + STATUS_LOST_WRITEBEHIND_DATA NTStatus = 0xC0000222 + STATUS_CLIENT_SERVER_PARAMETERS_INVALID NTStatus = 0xC0000223 + STATUS_PASSWORD_MUST_CHANGE NTStatus = 0xC0000224 + STATUS_NOT_FOUND NTStatus = 0xC0000225 + STATUS_NOT_TINY_STREAM NTStatus = 0xC0000226 + STATUS_RECOVERY_FAILURE NTStatus = 0xC0000227 + STATUS_STACK_OVERFLOW_READ NTStatus = 0xC0000228 + STATUS_FAIL_CHECK NTStatus = 0xC0000229 + STATUS_DUPLICATE_OBJECTID NTStatus = 0xC000022A + STATUS_OBJECTID_EXISTS NTStatus = 0xC000022B + STATUS_CONVERT_TO_LARGE NTStatus = 0xC000022C + STATUS_RETRY NTStatus = 0xC000022D + STATUS_FOUND_OUT_OF_SCOPE NTStatus = 0xC000022E + STATUS_ALLOCATE_BUCKET NTStatus = 0xC000022F + STATUS_PROPSET_NOT_FOUND NTStatus = 0xC0000230 + STATUS_MARSHALL_OVERFLOW NTStatus = 0xC0000231 + STATUS_INVALID_VARIANT NTStatus = 0xC0000232 + STATUS_DOMAIN_CONTROLLER_NOT_FOUND NTStatus = 0xC0000233 + STATUS_ACCOUNT_LOCKED_OUT NTStatus = 0xC0000234 + STATUS_HANDLE_NOT_CLOSABLE NTStatus = 0xC0000235 + STATUS_CONNECTION_REFUSED NTStatus = 0xC0000236 + STATUS_GRACEFUL_DISCONNECT NTStatus = 0xC0000237 + STATUS_ADDRESS_ALREADY_ASSOCIATED NTStatus = 0xC0000238 + STATUS_ADDRESS_NOT_ASSOCIATED NTStatus = 0xC0000239 + STATUS_CONNECTION_INVALID NTStatus = 0xC000023A + STATUS_CONNECTION_ACTIVE NTStatus = 0xC000023B + STATUS_NETWORK_UNREACHABLE NTStatus = 0xC000023C + STATUS_HOST_UNREACHABLE NTStatus = 0xC000023D + STATUS_PROTOCOL_UNREACHABLE NTStatus = 0xC000023E + STATUS_PORT_UNREACHABLE NTStatus = 0xC000023F + STATUS_REQUEST_ABORTED NTStatus = 0xC0000240 + STATUS_CONNECTION_ABORTED NTStatus = 0xC0000241 + STATUS_BAD_COMPRESSION_BUFFER NTStatus = 0xC0000242 + STATUS_USER_MAPPED_FILE NTStatus = 0xC0000243 + STATUS_AUDIT_FAILED NTStatus = 0xC0000244 + STATUS_TIMER_RESOLUTION_NOT_SET NTStatus = 0xC0000245 + STATUS_CONNECTION_COUNT_LIMIT NTStatus = 0xC0000246 + STATUS_LOGIN_TIME_RESTRICTION NTStatus = 0xC0000247 + STATUS_LOGIN_WKSTA_RESTRICTION NTStatus = 0xC0000248 + STATUS_IMAGE_MP_UP_MISMATCH NTStatus = 0xC0000249 + STATUS_INSUFFICIENT_LOGON_INFO NTStatus = 0xC0000250 + STATUS_BAD_DLL_ENTRYPOINT NTStatus = 0xC0000251 + STATUS_BAD_SERVICE_ENTRYPOINT NTStatus = 0xC0000252 + STATUS_LPC_REPLY_LOST NTStatus = 0xC0000253 + STATUS_IP_ADDRESS_CONFLICT1 NTStatus = 0xC0000254 + STATUS_IP_ADDRESS_CONFLICT2 NTStatus = 0xC0000255 + STATUS_REGISTRY_QUOTA_LIMIT NTStatus = 0xC0000256 + STATUS_PATH_NOT_COVERED NTStatus = 0xC0000257 + STATUS_NO_CALLBACK_ACTIVE NTStatus = 0xC0000258 + STATUS_LICENSE_QUOTA_EXCEEDED NTStatus = 0xC0000259 + STATUS_PWD_TOO_SHORT NTStatus = 0xC000025A + STATUS_PWD_TOO_RECENT NTStatus = 0xC000025B + STATUS_PWD_HISTORY_CONFLICT NTStatus = 0xC000025C + STATUS_PLUGPLAY_NO_DEVICE NTStatus = 0xC000025E + STATUS_UNSUPPORTED_COMPRESSION NTStatus = 0xC000025F + STATUS_INVALID_HW_PROFILE NTStatus = 0xC0000260 + STATUS_INVALID_PLUGPLAY_DEVICE_PATH NTStatus = 0xC0000261 + STATUS_DRIVER_ORDINAL_NOT_FOUND NTStatus = 0xC0000262 + STATUS_DRIVER_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000263 + STATUS_RESOURCE_NOT_OWNED NTStatus = 0xC0000264 + STATUS_TOO_MANY_LINKS NTStatus = 0xC0000265 + STATUS_QUOTA_LIST_INCONSISTENT NTStatus = 0xC0000266 + STATUS_FILE_IS_OFFLINE NTStatus = 0xC0000267 + STATUS_EVALUATION_EXPIRATION NTStatus = 0xC0000268 + STATUS_ILLEGAL_DLL_RELOCATION NTStatus = 0xC0000269 + STATUS_LICENSE_VIOLATION NTStatus = 0xC000026A + STATUS_DLL_INIT_FAILED_LOGOFF NTStatus = 0xC000026B + STATUS_DRIVER_UNABLE_TO_LOAD NTStatus = 0xC000026C + STATUS_DFS_UNAVAILABLE NTStatus = 0xC000026D + STATUS_VOLUME_DISMOUNTED NTStatus = 0xC000026E + STATUS_WX86_INTERNAL_ERROR NTStatus = 0xC000026F + STATUS_WX86_FLOAT_STACK_CHECK NTStatus = 0xC0000270 + STATUS_VALIDATE_CONTINUE NTStatus = 0xC0000271 + STATUS_NO_MATCH NTStatus = 0xC0000272 + STATUS_NO_MORE_MATCHES NTStatus = 0xC0000273 + STATUS_NOT_A_REPARSE_POINT NTStatus = 0xC0000275 + STATUS_IO_REPARSE_TAG_INVALID NTStatus = 0xC0000276 + STATUS_IO_REPARSE_TAG_MISMATCH NTStatus = 0xC0000277 + STATUS_IO_REPARSE_DATA_INVALID NTStatus = 0xC0000278 + STATUS_IO_REPARSE_TAG_NOT_HANDLED NTStatus = 0xC0000279 + STATUS_PWD_TOO_LONG NTStatus = 0xC000027A + STATUS_STOWED_EXCEPTION NTStatus = 0xC000027B + STATUS_CONTEXT_STOWED_EXCEPTION NTStatus = 0xC000027C + STATUS_REPARSE_POINT_NOT_RESOLVED NTStatus = 0xC0000280 + STATUS_DIRECTORY_IS_A_REPARSE_POINT NTStatus = 0xC0000281 + STATUS_RANGE_LIST_CONFLICT NTStatus = 0xC0000282 + STATUS_SOURCE_ELEMENT_EMPTY NTStatus = 0xC0000283 + STATUS_DESTINATION_ELEMENT_FULL NTStatus = 0xC0000284 + STATUS_ILLEGAL_ELEMENT_ADDRESS NTStatus = 0xC0000285 + STATUS_MAGAZINE_NOT_PRESENT NTStatus = 0xC0000286 + STATUS_REINITIALIZATION_NEEDED NTStatus = 0xC0000287 + STATUS_DEVICE_REQUIRES_CLEANING NTStatus = 0x80000288 + STATUS_DEVICE_DOOR_OPEN NTStatus = 0x80000289 + STATUS_ENCRYPTION_FAILED NTStatus = 0xC000028A + STATUS_DECRYPTION_FAILED NTStatus = 0xC000028B + STATUS_RANGE_NOT_FOUND NTStatus = 0xC000028C + STATUS_NO_RECOVERY_POLICY NTStatus = 0xC000028D + STATUS_NO_EFS NTStatus = 0xC000028E + STATUS_WRONG_EFS NTStatus = 0xC000028F + STATUS_NO_USER_KEYS NTStatus = 0xC0000290 + STATUS_FILE_NOT_ENCRYPTED NTStatus = 0xC0000291 + STATUS_NOT_EXPORT_FORMAT NTStatus = 0xC0000292 + STATUS_FILE_ENCRYPTED NTStatus = 0xC0000293 + STATUS_WAKE_SYSTEM NTStatus = 0x40000294 + STATUS_WMI_GUID_NOT_FOUND NTStatus = 0xC0000295 + STATUS_WMI_INSTANCE_NOT_FOUND NTStatus = 0xC0000296 + STATUS_WMI_ITEMID_NOT_FOUND NTStatus = 0xC0000297 + STATUS_WMI_TRY_AGAIN NTStatus = 0xC0000298 + STATUS_SHARED_POLICY NTStatus = 0xC0000299 + STATUS_POLICY_OBJECT_NOT_FOUND NTStatus = 0xC000029A + STATUS_POLICY_ONLY_IN_DS NTStatus = 0xC000029B + STATUS_VOLUME_NOT_UPGRADED NTStatus = 0xC000029C + STATUS_REMOTE_STORAGE_NOT_ACTIVE NTStatus = 0xC000029D + STATUS_REMOTE_STORAGE_MEDIA_ERROR NTStatus = 0xC000029E + STATUS_NO_TRACKING_SERVICE NTStatus = 0xC000029F + STATUS_SERVER_SID_MISMATCH NTStatus = 0xC00002A0 + STATUS_DS_NO_ATTRIBUTE_OR_VALUE NTStatus = 0xC00002A1 + STATUS_DS_INVALID_ATTRIBUTE_SYNTAX NTStatus = 0xC00002A2 + STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED NTStatus = 0xC00002A3 + STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS NTStatus = 0xC00002A4 + STATUS_DS_BUSY NTStatus = 0xC00002A5 + STATUS_DS_UNAVAILABLE NTStatus = 0xC00002A6 + STATUS_DS_NO_RIDS_ALLOCATED NTStatus = 0xC00002A7 + STATUS_DS_NO_MORE_RIDS NTStatus = 0xC00002A8 + STATUS_DS_INCORRECT_ROLE_OWNER NTStatus = 0xC00002A9 + STATUS_DS_RIDMGR_INIT_ERROR NTStatus = 0xC00002AA + STATUS_DS_OBJ_CLASS_VIOLATION NTStatus = 0xC00002AB + STATUS_DS_CANT_ON_NON_LEAF NTStatus = 0xC00002AC + STATUS_DS_CANT_ON_RDN NTStatus = 0xC00002AD + STATUS_DS_CANT_MOD_OBJ_CLASS NTStatus = 0xC00002AE + STATUS_DS_CROSS_DOM_MOVE_FAILED NTStatus = 0xC00002AF + STATUS_DS_GC_NOT_AVAILABLE NTStatus = 0xC00002B0 + STATUS_DIRECTORY_SERVICE_REQUIRED NTStatus = 0xC00002B1 + STATUS_REPARSE_ATTRIBUTE_CONFLICT NTStatus = 0xC00002B2 + STATUS_CANT_ENABLE_DENY_ONLY NTStatus = 0xC00002B3 + STATUS_FLOAT_MULTIPLE_FAULTS NTStatus = 0xC00002B4 + STATUS_FLOAT_MULTIPLE_TRAPS NTStatus = 0xC00002B5 + STATUS_DEVICE_REMOVED NTStatus = 0xC00002B6 + STATUS_JOURNAL_DELETE_IN_PROGRESS NTStatus = 0xC00002B7 + STATUS_JOURNAL_NOT_ACTIVE NTStatus = 0xC00002B8 + STATUS_NOINTERFACE NTStatus = 0xC00002B9 + STATUS_DS_RIDMGR_DISABLED NTStatus = 0xC00002BA + STATUS_DS_ADMIN_LIMIT_EXCEEDED NTStatus = 0xC00002C1 + STATUS_DRIVER_FAILED_SLEEP NTStatus = 0xC00002C2 + STATUS_MUTUAL_AUTHENTICATION_FAILED NTStatus = 0xC00002C3 + STATUS_CORRUPT_SYSTEM_FILE NTStatus = 0xC00002C4 + STATUS_DATATYPE_MISALIGNMENT_ERROR NTStatus = 0xC00002C5 + STATUS_WMI_READ_ONLY NTStatus = 0xC00002C6 + STATUS_WMI_SET_FAILURE NTStatus = 0xC00002C7 + STATUS_COMMITMENT_MINIMUM NTStatus = 0xC00002C8 + STATUS_REG_NAT_CONSUMPTION NTStatus = 0xC00002C9 + STATUS_TRANSPORT_FULL NTStatus = 0xC00002CA + STATUS_DS_SAM_INIT_FAILURE NTStatus = 0xC00002CB + STATUS_ONLY_IF_CONNECTED NTStatus = 0xC00002CC + STATUS_DS_SENSITIVE_GROUP_VIOLATION NTStatus = 0xC00002CD + STATUS_PNP_RESTART_ENUMERATION NTStatus = 0xC00002CE + STATUS_JOURNAL_ENTRY_DELETED NTStatus = 0xC00002CF + STATUS_DS_CANT_MOD_PRIMARYGROUPID NTStatus = 0xC00002D0 + STATUS_SYSTEM_IMAGE_BAD_SIGNATURE NTStatus = 0xC00002D1 + STATUS_PNP_REBOOT_REQUIRED NTStatus = 0xC00002D2 + STATUS_POWER_STATE_INVALID NTStatus = 0xC00002D3 + STATUS_DS_INVALID_GROUP_TYPE NTStatus = 0xC00002D4 + STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D5 + STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D6 + STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D7 + STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC00002D8 + STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D9 + STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER NTStatus = 0xC00002DA + STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER NTStatus = 0xC00002DB + STATUS_DS_HAVE_PRIMARY_MEMBERS NTStatus = 0xC00002DC + STATUS_WMI_NOT_SUPPORTED NTStatus = 0xC00002DD + STATUS_INSUFFICIENT_POWER NTStatus = 0xC00002DE + STATUS_SAM_NEED_BOOTKEY_PASSWORD NTStatus = 0xC00002DF + STATUS_SAM_NEED_BOOTKEY_FLOPPY NTStatus = 0xC00002E0 + STATUS_DS_CANT_START NTStatus = 0xC00002E1 + STATUS_DS_INIT_FAILURE NTStatus = 0xC00002E2 + STATUS_SAM_INIT_FAILURE NTStatus = 0xC00002E3 + STATUS_DS_GC_REQUIRED NTStatus = 0xC00002E4 + STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY NTStatus = 0xC00002E5 + STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS NTStatus = 0xC00002E6 + STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED NTStatus = 0xC00002E7 + STATUS_MULTIPLE_FAULT_VIOLATION NTStatus = 0xC00002E8 + STATUS_CURRENT_DOMAIN_NOT_ALLOWED NTStatus = 0xC00002E9 + STATUS_CANNOT_MAKE NTStatus = 0xC00002EA + STATUS_SYSTEM_SHUTDOWN NTStatus = 0xC00002EB + STATUS_DS_INIT_FAILURE_CONSOLE NTStatus = 0xC00002EC + STATUS_DS_SAM_INIT_FAILURE_CONSOLE NTStatus = 0xC00002ED + STATUS_UNFINISHED_CONTEXT_DELETED NTStatus = 0xC00002EE + STATUS_NO_TGT_REPLY NTStatus = 0xC00002EF + STATUS_OBJECTID_NOT_FOUND NTStatus = 0xC00002F0 + STATUS_NO_IP_ADDRESSES NTStatus = 0xC00002F1 + STATUS_WRONG_CREDENTIAL_HANDLE NTStatus = 0xC00002F2 + STATUS_CRYPTO_SYSTEM_INVALID NTStatus = 0xC00002F3 + STATUS_MAX_REFERRALS_EXCEEDED NTStatus = 0xC00002F4 + STATUS_MUST_BE_KDC NTStatus = 0xC00002F5 + STATUS_STRONG_CRYPTO_NOT_SUPPORTED NTStatus = 0xC00002F6 + STATUS_TOO_MANY_PRINCIPALS NTStatus = 0xC00002F7 + STATUS_NO_PA_DATA NTStatus = 0xC00002F8 + STATUS_PKINIT_NAME_MISMATCH NTStatus = 0xC00002F9 + STATUS_SMARTCARD_LOGON_REQUIRED NTStatus = 0xC00002FA + STATUS_KDC_INVALID_REQUEST NTStatus = 0xC00002FB + STATUS_KDC_UNABLE_TO_REFER NTStatus = 0xC00002FC + STATUS_KDC_UNKNOWN_ETYPE NTStatus = 0xC00002FD + STATUS_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FE + STATUS_SERVER_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FF + STATUS_NOT_SUPPORTED_ON_SBS NTStatus = 0xC0000300 + STATUS_WMI_GUID_DISCONNECTED NTStatus = 0xC0000301 + STATUS_WMI_ALREADY_DISABLED NTStatus = 0xC0000302 + STATUS_WMI_ALREADY_ENABLED NTStatus = 0xC0000303 + STATUS_MFT_TOO_FRAGMENTED NTStatus = 0xC0000304 + STATUS_COPY_PROTECTION_FAILURE NTStatus = 0xC0000305 + STATUS_CSS_AUTHENTICATION_FAILURE NTStatus = 0xC0000306 + STATUS_CSS_KEY_NOT_PRESENT NTStatus = 0xC0000307 + STATUS_CSS_KEY_NOT_ESTABLISHED NTStatus = 0xC0000308 + STATUS_CSS_SCRAMBLED_SECTOR NTStatus = 0xC0000309 + STATUS_CSS_REGION_MISMATCH NTStatus = 0xC000030A + STATUS_CSS_RESETS_EXHAUSTED NTStatus = 0xC000030B + STATUS_PASSWORD_CHANGE_REQUIRED NTStatus = 0xC000030C + STATUS_LOST_MODE_LOGON_RESTRICTION NTStatus = 0xC000030D + STATUS_PKINIT_FAILURE NTStatus = 0xC0000320 + STATUS_SMARTCARD_SUBSYSTEM_FAILURE NTStatus = 0xC0000321 + STATUS_NO_KERB_KEY NTStatus = 0xC0000322 + STATUS_HOST_DOWN NTStatus = 0xC0000350 + STATUS_UNSUPPORTED_PREAUTH NTStatus = 0xC0000351 + STATUS_EFS_ALG_BLOB_TOO_BIG NTStatus = 0xC0000352 + STATUS_PORT_NOT_SET NTStatus = 0xC0000353 + STATUS_DEBUGGER_INACTIVE NTStatus = 0xC0000354 + STATUS_DS_VERSION_CHECK_FAILURE NTStatus = 0xC0000355 + STATUS_AUDITING_DISABLED NTStatus = 0xC0000356 + STATUS_PRENT4_MACHINE_ACCOUNT NTStatus = 0xC0000357 + STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC0000358 + STATUS_INVALID_IMAGE_WIN_32 NTStatus = 0xC0000359 + STATUS_INVALID_IMAGE_WIN_64 NTStatus = 0xC000035A + STATUS_BAD_BINDINGS NTStatus = 0xC000035B + STATUS_NETWORK_SESSION_EXPIRED NTStatus = 0xC000035C + STATUS_APPHELP_BLOCK NTStatus = 0xC000035D + STATUS_ALL_SIDS_FILTERED NTStatus = 0xC000035E + STATUS_NOT_SAFE_MODE_DRIVER NTStatus = 0xC000035F + STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT NTStatus = 0xC0000361 + STATUS_ACCESS_DISABLED_BY_POLICY_PATH NTStatus = 0xC0000362 + STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER NTStatus = 0xC0000363 + STATUS_ACCESS_DISABLED_BY_POLICY_OTHER NTStatus = 0xC0000364 + STATUS_FAILED_DRIVER_ENTRY NTStatus = 0xC0000365 + STATUS_DEVICE_ENUMERATION_ERROR NTStatus = 0xC0000366 + STATUS_MOUNT_POINT_NOT_RESOLVED NTStatus = 0xC0000368 + STATUS_INVALID_DEVICE_OBJECT_PARAMETER NTStatus = 0xC0000369 + STATUS_MCA_OCCURED NTStatus = 0xC000036A + STATUS_DRIVER_BLOCKED_CRITICAL NTStatus = 0xC000036B + STATUS_DRIVER_BLOCKED NTStatus = 0xC000036C + STATUS_DRIVER_DATABASE_ERROR NTStatus = 0xC000036D + STATUS_SYSTEM_HIVE_TOO_LARGE NTStatus = 0xC000036E + STATUS_INVALID_IMPORT_OF_NON_DLL NTStatus = 0xC000036F + STATUS_DS_SHUTTING_DOWN NTStatus = 0x40000370 + STATUS_NO_SECRETS NTStatus = 0xC0000371 + STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY NTStatus = 0xC0000372 + STATUS_FAILED_STACK_SWITCH NTStatus = 0xC0000373 + STATUS_HEAP_CORRUPTION NTStatus = 0xC0000374 + STATUS_SMARTCARD_WRONG_PIN NTStatus = 0xC0000380 + STATUS_SMARTCARD_CARD_BLOCKED NTStatus = 0xC0000381 + STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED NTStatus = 0xC0000382 + STATUS_SMARTCARD_NO_CARD NTStatus = 0xC0000383 + STATUS_SMARTCARD_NO_KEY_CONTAINER NTStatus = 0xC0000384 + STATUS_SMARTCARD_NO_CERTIFICATE NTStatus = 0xC0000385 + STATUS_SMARTCARD_NO_KEYSET NTStatus = 0xC0000386 + STATUS_SMARTCARD_IO_ERROR NTStatus = 0xC0000387 + STATUS_DOWNGRADE_DETECTED NTStatus = 0xC0000388 + STATUS_SMARTCARD_CERT_REVOKED NTStatus = 0xC0000389 + STATUS_ISSUING_CA_UNTRUSTED NTStatus = 0xC000038A + STATUS_REVOCATION_OFFLINE_C NTStatus = 0xC000038B + STATUS_PKINIT_CLIENT_FAILURE NTStatus = 0xC000038C + STATUS_SMARTCARD_CERT_EXPIRED NTStatus = 0xC000038D + STATUS_DRIVER_FAILED_PRIOR_UNLOAD NTStatus = 0xC000038E + STATUS_SMARTCARD_SILENT_CONTEXT NTStatus = 0xC000038F + STATUS_PER_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000401 + STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000402 + STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000403 + STATUS_DS_NAME_NOT_UNIQUE NTStatus = 0xC0000404 + STATUS_DS_DUPLICATE_ID_FOUND NTStatus = 0xC0000405 + STATUS_DS_GROUP_CONVERSION_ERROR NTStatus = 0xC0000406 + STATUS_VOLSNAP_PREPARE_HIBERNATE NTStatus = 0xC0000407 + STATUS_USER2USER_REQUIRED NTStatus = 0xC0000408 + STATUS_STACK_BUFFER_OVERRUN NTStatus = 0xC0000409 + STATUS_NO_S4U_PROT_SUPPORT NTStatus = 0xC000040A + STATUS_CROSSREALM_DELEGATION_FAILURE NTStatus = 0xC000040B + STATUS_REVOCATION_OFFLINE_KDC NTStatus = 0xC000040C + STATUS_ISSUING_CA_UNTRUSTED_KDC NTStatus = 0xC000040D + STATUS_KDC_CERT_EXPIRED NTStatus = 0xC000040E + STATUS_KDC_CERT_REVOKED NTStatus = 0xC000040F + STATUS_PARAMETER_QUOTA_EXCEEDED NTStatus = 0xC0000410 + STATUS_HIBERNATION_FAILURE NTStatus = 0xC0000411 + STATUS_DELAY_LOAD_FAILED NTStatus = 0xC0000412 + STATUS_AUTHENTICATION_FIREWALL_FAILED NTStatus = 0xC0000413 + STATUS_VDM_DISALLOWED NTStatus = 0xC0000414 + STATUS_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC0000415 + STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE NTStatus = 0xC0000416 + STATUS_INVALID_CRUNTIME_PARAMETER NTStatus = 0xC0000417 + STATUS_NTLM_BLOCKED NTStatus = 0xC0000418 + STATUS_DS_SRC_SID_EXISTS_IN_FOREST NTStatus = 0xC0000419 + STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041A + STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041B + STATUS_INVALID_USER_PRINCIPAL_NAME NTStatus = 0xC000041C + STATUS_FATAL_USER_CALLBACK_EXCEPTION NTStatus = 0xC000041D + STATUS_ASSERTION_FAILURE NTStatus = 0xC0000420 + STATUS_VERIFIER_STOP NTStatus = 0xC0000421 + STATUS_CALLBACK_POP_STACK NTStatus = 0xC0000423 + STATUS_INCOMPATIBLE_DRIVER_BLOCKED NTStatus = 0xC0000424 + STATUS_HIVE_UNLOADED NTStatus = 0xC0000425 + STATUS_COMPRESSION_DISABLED NTStatus = 0xC0000426 + STATUS_FILE_SYSTEM_LIMITATION NTStatus = 0xC0000427 + STATUS_INVALID_IMAGE_HASH NTStatus = 0xC0000428 + STATUS_NOT_CAPABLE NTStatus = 0xC0000429 + STATUS_REQUEST_OUT_OF_SEQUENCE NTStatus = 0xC000042A + STATUS_IMPLEMENTATION_LIMIT NTStatus = 0xC000042B + STATUS_ELEVATION_REQUIRED NTStatus = 0xC000042C + STATUS_NO_SECURITY_CONTEXT NTStatus = 0xC000042D + STATUS_PKU2U_CERT_FAILURE NTStatus = 0xC000042F + STATUS_BEYOND_VDL NTStatus = 0xC0000432 + STATUS_ENCOUNTERED_WRITE_IN_PROGRESS NTStatus = 0xC0000433 + STATUS_PTE_CHANGED NTStatus = 0xC0000434 + STATUS_PURGE_FAILED NTStatus = 0xC0000435 + STATUS_CRED_REQUIRES_CONFIRMATION NTStatus = 0xC0000440 + STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE NTStatus = 0xC0000441 + STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER NTStatus = 0xC0000442 + STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE NTStatus = 0xC0000443 + STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE NTStatus = 0xC0000444 + STATUS_CS_ENCRYPTION_FILE_NOT_CSE NTStatus = 0xC0000445 + STATUS_INVALID_LABEL NTStatus = 0xC0000446 + STATUS_DRIVER_PROCESS_TERMINATED NTStatus = 0xC0000450 + STATUS_AMBIGUOUS_SYSTEM_DEVICE NTStatus = 0xC0000451 + STATUS_SYSTEM_DEVICE_NOT_FOUND NTStatus = 0xC0000452 + STATUS_RESTART_BOOT_APPLICATION NTStatus = 0xC0000453 + STATUS_INSUFFICIENT_NVRAM_RESOURCES NTStatus = 0xC0000454 + STATUS_INVALID_SESSION NTStatus = 0xC0000455 + STATUS_THREAD_ALREADY_IN_SESSION NTStatus = 0xC0000456 + STATUS_THREAD_NOT_IN_SESSION NTStatus = 0xC0000457 + STATUS_INVALID_WEIGHT NTStatus = 0xC0000458 + STATUS_REQUEST_PAUSED NTStatus = 0xC0000459 + STATUS_NO_RANGES_PROCESSED NTStatus = 0xC0000460 + STATUS_DISK_RESOURCES_EXHAUSTED NTStatus = 0xC0000461 + STATUS_NEEDS_REMEDIATION NTStatus = 0xC0000462 + STATUS_DEVICE_FEATURE_NOT_SUPPORTED NTStatus = 0xC0000463 + STATUS_DEVICE_UNREACHABLE NTStatus = 0xC0000464 + STATUS_INVALID_TOKEN NTStatus = 0xC0000465 + STATUS_SERVER_UNAVAILABLE NTStatus = 0xC0000466 + STATUS_FILE_NOT_AVAILABLE NTStatus = 0xC0000467 + STATUS_DEVICE_INSUFFICIENT_RESOURCES NTStatus = 0xC0000468 + STATUS_PACKAGE_UPDATING NTStatus = 0xC0000469 + STATUS_NOT_READ_FROM_COPY NTStatus = 0xC000046A + STATUS_FT_WRITE_FAILURE NTStatus = 0xC000046B + STATUS_FT_DI_SCAN_REQUIRED NTStatus = 0xC000046C + STATUS_OBJECT_NOT_EXTERNALLY_BACKED NTStatus = 0xC000046D + STATUS_EXTERNAL_BACKING_PROVIDER_UNKNOWN NTStatus = 0xC000046E + STATUS_COMPRESSION_NOT_BENEFICIAL NTStatus = 0xC000046F + STATUS_DATA_CHECKSUM_ERROR NTStatus = 0xC0000470 + STATUS_INTERMIXED_KERNEL_EA_OPERATION NTStatus = 0xC0000471 + STATUS_TRIM_READ_ZERO_NOT_SUPPORTED NTStatus = 0xC0000472 + STATUS_TOO_MANY_SEGMENT_DESCRIPTORS NTStatus = 0xC0000473 + STATUS_INVALID_OFFSET_ALIGNMENT NTStatus = 0xC0000474 + STATUS_INVALID_FIELD_IN_PARAMETER_LIST NTStatus = 0xC0000475 + STATUS_OPERATION_IN_PROGRESS NTStatus = 0xC0000476 + STATUS_INVALID_INITIATOR_TARGET_PATH NTStatus = 0xC0000477 + STATUS_SCRUB_DATA_DISABLED NTStatus = 0xC0000478 + STATUS_NOT_REDUNDANT_STORAGE NTStatus = 0xC0000479 + STATUS_RESIDENT_FILE_NOT_SUPPORTED NTStatus = 0xC000047A + STATUS_COMPRESSED_FILE_NOT_SUPPORTED NTStatus = 0xC000047B + STATUS_DIRECTORY_NOT_SUPPORTED NTStatus = 0xC000047C + STATUS_IO_OPERATION_TIMEOUT NTStatus = 0xC000047D + STATUS_SYSTEM_NEEDS_REMEDIATION NTStatus = 0xC000047E + STATUS_APPX_INTEGRITY_FAILURE_CLR_NGEN NTStatus = 0xC000047F + STATUS_SHARE_UNAVAILABLE NTStatus = 0xC0000480 + STATUS_APISET_NOT_HOSTED NTStatus = 0xC0000481 + STATUS_APISET_NOT_PRESENT NTStatus = 0xC0000482 + STATUS_DEVICE_HARDWARE_ERROR NTStatus = 0xC0000483 + STATUS_FIRMWARE_SLOT_INVALID NTStatus = 0xC0000484 + STATUS_FIRMWARE_IMAGE_INVALID NTStatus = 0xC0000485 + STATUS_STORAGE_TOPOLOGY_ID_MISMATCH NTStatus = 0xC0000486 + STATUS_WIM_NOT_BOOTABLE NTStatus = 0xC0000487 + STATUS_BLOCKED_BY_PARENTAL_CONTROLS NTStatus = 0xC0000488 + STATUS_NEEDS_REGISTRATION NTStatus = 0xC0000489 + STATUS_QUOTA_ACTIVITY NTStatus = 0xC000048A + STATUS_CALLBACK_INVOKE_INLINE NTStatus = 0xC000048B + STATUS_BLOCK_TOO_MANY_REFERENCES NTStatus = 0xC000048C + STATUS_MARKED_TO_DISALLOW_WRITES NTStatus = 0xC000048D + STATUS_NETWORK_ACCESS_DENIED_EDP NTStatus = 0xC000048E + STATUS_ENCLAVE_FAILURE NTStatus = 0xC000048F + STATUS_PNP_NO_COMPAT_DRIVERS NTStatus = 0xC0000490 + STATUS_PNP_DRIVER_PACKAGE_NOT_FOUND NTStatus = 0xC0000491 + STATUS_PNP_DRIVER_CONFIGURATION_NOT_FOUND NTStatus = 0xC0000492 + STATUS_PNP_DRIVER_CONFIGURATION_INCOMPLETE NTStatus = 0xC0000493 + STATUS_PNP_FUNCTION_DRIVER_REQUIRED NTStatus = 0xC0000494 + STATUS_PNP_DEVICE_CONFIGURATION_PENDING NTStatus = 0xC0000495 + STATUS_DEVICE_HINT_NAME_BUFFER_TOO_SMALL NTStatus = 0xC0000496 + STATUS_PACKAGE_NOT_AVAILABLE NTStatus = 0xC0000497 + STATUS_DEVICE_IN_MAINTENANCE NTStatus = 0xC0000499 + STATUS_NOT_SUPPORTED_ON_DAX NTStatus = 0xC000049A + STATUS_FREE_SPACE_TOO_FRAGMENTED NTStatus = 0xC000049B + STATUS_DAX_MAPPING_EXISTS NTStatus = 0xC000049C + STATUS_CHILD_PROCESS_BLOCKED NTStatus = 0xC000049D + STATUS_STORAGE_LOST_DATA_PERSISTENCE NTStatus = 0xC000049E + STATUS_VRF_CFG_ENABLED NTStatus = 0xC000049F + STATUS_PARTITION_TERMINATING NTStatus = 0xC00004A0 + STATUS_EXTERNAL_SYSKEY_NOT_SUPPORTED NTStatus = 0xC00004A1 + STATUS_ENCLAVE_VIOLATION NTStatus = 0xC00004A2 + STATUS_FILE_PROTECTED_UNDER_DPL NTStatus = 0xC00004A3 + STATUS_VOLUME_NOT_CLUSTER_ALIGNED NTStatus = 0xC00004A4 + STATUS_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND NTStatus = 0xC00004A5 + STATUS_APPX_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A6 + STATUS_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A7 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET NTStatus = 0xC00004A8 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE NTStatus = 0xC00004A9 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER NTStatus = 0xC00004AA + STATUS_FT_READ_FAILURE NTStatus = 0xC00004AB + STATUS_PATCH_CONFLICT NTStatus = 0xC00004AC + STATUS_STORAGE_RESERVE_ID_INVALID NTStatus = 0xC00004AD + STATUS_STORAGE_RESERVE_DOES_NOT_EXIST NTStatus = 0xC00004AE + STATUS_STORAGE_RESERVE_ALREADY_EXISTS NTStatus = 0xC00004AF + STATUS_STORAGE_RESERVE_NOT_EMPTY NTStatus = 0xC00004B0 + STATUS_NOT_A_DAX_VOLUME NTStatus = 0xC00004B1 + STATUS_NOT_DAX_MAPPABLE NTStatus = 0xC00004B2 + STATUS_CASE_DIFFERING_NAMES_IN_DIR NTStatus = 0xC00004B3 + STATUS_FILE_NOT_SUPPORTED NTStatus = 0xC00004B4 + STATUS_NOT_SUPPORTED_WITH_BTT NTStatus = 0xC00004B5 + STATUS_ENCRYPTION_DISABLED NTStatus = 0xC00004B6 + STATUS_ENCRYPTING_METADATA_DISALLOWED NTStatus = 0xC00004B7 + STATUS_CANT_CLEAR_ENCRYPTION_FLAG NTStatus = 0xC00004B8 + STATUS_INVALID_TASK_NAME NTStatus = 0xC0000500 + STATUS_INVALID_TASK_INDEX NTStatus = 0xC0000501 + STATUS_THREAD_ALREADY_IN_TASK NTStatus = 0xC0000502 + STATUS_CALLBACK_BYPASS NTStatus = 0xC0000503 + STATUS_UNDEFINED_SCOPE NTStatus = 0xC0000504 + STATUS_INVALID_CAP NTStatus = 0xC0000505 + STATUS_NOT_GUI_PROCESS NTStatus = 0xC0000506 + STATUS_DEVICE_HUNG NTStatus = 0xC0000507 + STATUS_CONTAINER_ASSIGNED NTStatus = 0xC0000508 + STATUS_JOB_NO_CONTAINER NTStatus = 0xC0000509 + STATUS_DEVICE_UNRESPONSIVE NTStatus = 0xC000050A + STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B + STATUS_ATTRIBUTE_NOT_PRESENT NTStatus = 0xC000050C + STATUS_NOT_A_TIERED_VOLUME NTStatus = 0xC000050D + STATUS_ALREADY_HAS_STREAM_ID NTStatus = 0xC000050E + STATUS_JOB_NOT_EMPTY NTStatus = 0xC000050F + STATUS_ALREADY_INITIALIZED NTStatus = 0xC0000510 + STATUS_ENCLAVE_NOT_TERMINATED NTStatus = 0xC0000511 + STATUS_ENCLAVE_IS_TERMINATING NTStatus = 0xC0000512 + STATUS_SMB1_NOT_AVAILABLE NTStatus = 0xC0000513 + STATUS_SMR_GARBAGE_COLLECTION_REQUIRED NTStatus = 0xC0000514 + STATUS_INTERRUPTED NTStatus = 0xC0000515 + STATUS_THREAD_NOT_RUNNING NTStatus = 0xC0000516 + STATUS_FAIL_FAST_EXCEPTION NTStatus = 0xC0000602 + STATUS_IMAGE_CERT_REVOKED NTStatus = 0xC0000603 + STATUS_DYNAMIC_CODE_BLOCKED NTStatus = 0xC0000604 + STATUS_IMAGE_CERT_EXPIRED NTStatus = 0xC0000605 + STATUS_STRICT_CFG_VIOLATION NTStatus = 0xC0000606 + STATUS_SET_CONTEXT_DENIED NTStatus = 0xC000060A + STATUS_CROSS_PARTITION_VIOLATION NTStatus = 0xC000060B + STATUS_PORT_CLOSED NTStatus = 0xC0000700 + STATUS_MESSAGE_LOST NTStatus = 0xC0000701 + STATUS_INVALID_MESSAGE NTStatus = 0xC0000702 + STATUS_REQUEST_CANCELED NTStatus = 0xC0000703 + STATUS_RECURSIVE_DISPATCH NTStatus = 0xC0000704 + STATUS_LPC_RECEIVE_BUFFER_EXPECTED NTStatus = 0xC0000705 + STATUS_LPC_INVALID_CONNECTION_USAGE NTStatus = 0xC0000706 + STATUS_LPC_REQUESTS_NOT_ALLOWED NTStatus = 0xC0000707 + STATUS_RESOURCE_IN_USE NTStatus = 0xC0000708 + STATUS_HARDWARE_MEMORY_ERROR NTStatus = 0xC0000709 + STATUS_THREADPOOL_HANDLE_EXCEPTION NTStatus = 0xC000070A + STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED NTStatus = 0xC000070B + STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED NTStatus = 0xC000070C + STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED NTStatus = 0xC000070D + STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED NTStatus = 0xC000070E + STATUS_THREADPOOL_RELEASED_DURING_OPERATION NTStatus = 0xC000070F + STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000710 + STATUS_APC_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000711 + STATUS_PROCESS_IS_PROTECTED NTStatus = 0xC0000712 + STATUS_MCA_EXCEPTION NTStatus = 0xC0000713 + STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE NTStatus = 0xC0000714 + STATUS_SYMLINK_CLASS_DISABLED NTStatus = 0xC0000715 + STATUS_INVALID_IDN_NORMALIZATION NTStatus = 0xC0000716 + STATUS_NO_UNICODE_TRANSLATION NTStatus = 0xC0000717 + STATUS_ALREADY_REGISTERED NTStatus = 0xC0000718 + STATUS_CONTEXT_MISMATCH NTStatus = 0xC0000719 + STATUS_PORT_ALREADY_HAS_COMPLETION_LIST NTStatus = 0xC000071A + STATUS_CALLBACK_RETURNED_THREAD_PRIORITY NTStatus = 0xC000071B + STATUS_INVALID_THREAD NTStatus = 0xC000071C + STATUS_CALLBACK_RETURNED_TRANSACTION NTStatus = 0xC000071D + STATUS_CALLBACK_RETURNED_LDR_LOCK NTStatus = 0xC000071E + STATUS_CALLBACK_RETURNED_LANG NTStatus = 0xC000071F + STATUS_CALLBACK_RETURNED_PRI_BACK NTStatus = 0xC0000720 + STATUS_CALLBACK_RETURNED_THREAD_AFFINITY NTStatus = 0xC0000721 + STATUS_LPC_HANDLE_COUNT_EXCEEDED NTStatus = 0xC0000722 + STATUS_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000723 + STATUS_KERNEL_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000724 + STATUS_ATTACHED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000725 + STATUS_TRIGGERED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000726 + STATUS_DISK_REPAIR_DISABLED NTStatus = 0xC0000800 + STATUS_DS_DOMAIN_RENAME_IN_PROGRESS NTStatus = 0xC0000801 + STATUS_DISK_QUOTA_EXCEEDED NTStatus = 0xC0000802 + STATUS_DATA_LOST_REPAIR NTStatus = 0x80000803 + STATUS_CONTENT_BLOCKED NTStatus = 0xC0000804 + STATUS_BAD_CLUSTERS NTStatus = 0xC0000805 + STATUS_VOLUME_DIRTY NTStatus = 0xC0000806 + STATUS_DISK_REPAIR_REDIRECTED NTStatus = 0x40000807 + STATUS_DISK_REPAIR_UNSUCCESSFUL NTStatus = 0xC0000808 + STATUS_CORRUPT_LOG_OVERFULL NTStatus = 0xC0000809 + STATUS_CORRUPT_LOG_CORRUPTED NTStatus = 0xC000080A + STATUS_CORRUPT_LOG_UNAVAILABLE NTStatus = 0xC000080B + STATUS_CORRUPT_LOG_DELETED_FULL NTStatus = 0xC000080C + STATUS_CORRUPT_LOG_CLEARED NTStatus = 0xC000080D + STATUS_ORPHAN_NAME_EXHAUSTED NTStatus = 0xC000080E + STATUS_PROACTIVE_SCAN_IN_PROGRESS NTStatus = 0xC000080F + STATUS_ENCRYPTED_IO_NOT_POSSIBLE NTStatus = 0xC0000810 + STATUS_CORRUPT_LOG_UPLEVEL_RECORDS NTStatus = 0xC0000811 + STATUS_FILE_CHECKED_OUT NTStatus = 0xC0000901 + STATUS_CHECKOUT_REQUIRED NTStatus = 0xC0000902 + STATUS_BAD_FILE_TYPE NTStatus = 0xC0000903 + STATUS_FILE_TOO_LARGE NTStatus = 0xC0000904 + STATUS_FORMS_AUTH_REQUIRED NTStatus = 0xC0000905 + STATUS_VIRUS_INFECTED NTStatus = 0xC0000906 + STATUS_VIRUS_DELETED NTStatus = 0xC0000907 + STATUS_BAD_MCFG_TABLE NTStatus = 0xC0000908 + STATUS_CANNOT_BREAK_OPLOCK NTStatus = 0xC0000909 + STATUS_BAD_KEY NTStatus = 0xC000090A + STATUS_BAD_DATA NTStatus = 0xC000090B + STATUS_NO_KEY NTStatus = 0xC000090C + STATUS_FILE_HANDLE_REVOKED NTStatus = 0xC0000910 + STATUS_WOW_ASSERTION NTStatus = 0xC0009898 + STATUS_INVALID_SIGNATURE NTStatus = 0xC000A000 + STATUS_HMAC_NOT_SUPPORTED NTStatus = 0xC000A001 + STATUS_AUTH_TAG_MISMATCH NTStatus = 0xC000A002 + STATUS_INVALID_STATE_TRANSITION NTStatus = 0xC000A003 + STATUS_INVALID_KERNEL_INFO_VERSION NTStatus = 0xC000A004 + STATUS_INVALID_PEP_INFO_VERSION NTStatus = 0xC000A005 + STATUS_HANDLE_REVOKED NTStatus = 0xC000A006 + STATUS_EOF_ON_GHOSTED_RANGE NTStatus = 0xC000A007 + STATUS_IPSEC_QUEUE_OVERFLOW NTStatus = 0xC000A010 + STATUS_ND_QUEUE_OVERFLOW NTStatus = 0xC000A011 + STATUS_HOPLIMIT_EXCEEDED NTStatus = 0xC000A012 + STATUS_PROTOCOL_NOT_SUPPORTED NTStatus = 0xC000A013 + STATUS_FASTPATH_REJECTED NTStatus = 0xC000A014 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED NTStatus = 0xC000A080 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR NTStatus = 0xC000A081 + STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR NTStatus = 0xC000A082 + STATUS_XML_PARSE_ERROR NTStatus = 0xC000A083 + STATUS_XMLDSIG_ERROR NTStatus = 0xC000A084 + STATUS_WRONG_COMPARTMENT NTStatus = 0xC000A085 + STATUS_AUTHIP_FAILURE NTStatus = 0xC000A086 + STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS NTStatus = 0xC000A087 + STATUS_DS_OID_NOT_FOUND NTStatus = 0xC000A088 + STATUS_INCORRECT_ACCOUNT_TYPE NTStatus = 0xC000A089 + STATUS_HASH_NOT_SUPPORTED NTStatus = 0xC000A100 + STATUS_HASH_NOT_PRESENT NTStatus = 0xC000A101 + STATUS_SECONDARY_IC_PROVIDER_NOT_REGISTERED NTStatus = 0xC000A121 + STATUS_GPIO_CLIENT_INFORMATION_INVALID NTStatus = 0xC000A122 + STATUS_GPIO_VERSION_NOT_SUPPORTED NTStatus = 0xC000A123 + STATUS_GPIO_INVALID_REGISTRATION_PACKET NTStatus = 0xC000A124 + STATUS_GPIO_OPERATION_DENIED NTStatus = 0xC000A125 + STATUS_GPIO_INCOMPATIBLE_CONNECT_MODE NTStatus = 0xC000A126 + STATUS_GPIO_INTERRUPT_ALREADY_UNMASKED NTStatus = 0x8000A127 + STATUS_CANNOT_SWITCH_RUNLEVEL NTStatus = 0xC000A141 + STATUS_INVALID_RUNLEVEL_SETTING NTStatus = 0xC000A142 + STATUS_RUNLEVEL_SWITCH_TIMEOUT NTStatus = 0xC000A143 + STATUS_SERVICES_FAILED_AUTOSTART NTStatus = 0x4000A144 + STATUS_RUNLEVEL_SWITCH_AGENT_TIMEOUT NTStatus = 0xC000A145 + STATUS_RUNLEVEL_SWITCH_IN_PROGRESS NTStatus = 0xC000A146 + STATUS_NOT_APPCONTAINER NTStatus = 0xC000A200 + STATUS_NOT_SUPPORTED_IN_APPCONTAINER NTStatus = 0xC000A201 + STATUS_INVALID_PACKAGE_SID_LENGTH NTStatus = 0xC000A202 + STATUS_LPAC_ACCESS_DENIED NTStatus = 0xC000A203 + STATUS_ADMINLESS_ACCESS_DENIED NTStatus = 0xC000A204 + STATUS_APP_DATA_NOT_FOUND NTStatus = 0xC000A281 + STATUS_APP_DATA_EXPIRED NTStatus = 0xC000A282 + STATUS_APP_DATA_CORRUPT NTStatus = 0xC000A283 + STATUS_APP_DATA_LIMIT_EXCEEDED NTStatus = 0xC000A284 + STATUS_APP_DATA_REBOOT_REQUIRED NTStatus = 0xC000A285 + STATUS_OFFLOAD_READ_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A1 + STATUS_OFFLOAD_WRITE_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A2 + STATUS_OFFLOAD_READ_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A3 + STATUS_OFFLOAD_WRITE_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A4 + STATUS_WOF_WIM_HEADER_CORRUPT NTStatus = 0xC000A2A5 + STATUS_WOF_WIM_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A6 + STATUS_WOF_FILE_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A7 + STATUS_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE NTStatus = 0xC000CE01 + STATUS_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT NTStatus = 0xC000CE02 + STATUS_FILE_SYSTEM_VIRTUALIZATION_BUSY NTStatus = 0xC000CE03 + STATUS_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN NTStatus = 0xC000CE04 + STATUS_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION NTStatus = 0xC000CE05 + STATUS_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT NTStatus = 0xC000CF00 + STATUS_CLOUD_FILE_PROVIDER_NOT_RUNNING NTStatus = 0xC000CF01 + STATUS_CLOUD_FILE_METADATA_CORRUPT NTStatus = 0xC000CF02 + STATUS_CLOUD_FILE_METADATA_TOO_LARGE NTStatus = 0xC000CF03 + STATUS_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE NTStatus = 0x8000CF04 + STATUS_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS NTStatus = 0x8000CF05 + STATUS_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED NTStatus = 0xC000CF06 + STATUS_NOT_A_CLOUD_FILE NTStatus = 0xC000CF07 + STATUS_CLOUD_FILE_NOT_IN_SYNC NTStatus = 0xC000CF08 + STATUS_CLOUD_FILE_ALREADY_CONNECTED NTStatus = 0xC000CF09 + STATUS_CLOUD_FILE_NOT_SUPPORTED NTStatus = 0xC000CF0A + STATUS_CLOUD_FILE_INVALID_REQUEST NTStatus = 0xC000CF0B + STATUS_CLOUD_FILE_READ_ONLY_VOLUME NTStatus = 0xC000CF0C + STATUS_CLOUD_FILE_CONNECTED_PROVIDER_ONLY NTStatus = 0xC000CF0D + STATUS_CLOUD_FILE_VALIDATION_FAILED NTStatus = 0xC000CF0E + STATUS_CLOUD_FILE_AUTHENTICATION_FAILED NTStatus = 0xC000CF0F + STATUS_CLOUD_FILE_INSUFFICIENT_RESOURCES NTStatus = 0xC000CF10 + STATUS_CLOUD_FILE_NETWORK_UNAVAILABLE NTStatus = 0xC000CF11 + STATUS_CLOUD_FILE_UNSUCCESSFUL NTStatus = 0xC000CF12 + STATUS_CLOUD_FILE_NOT_UNDER_SYNC_ROOT NTStatus = 0xC000CF13 + STATUS_CLOUD_FILE_IN_USE NTStatus = 0xC000CF14 + STATUS_CLOUD_FILE_PINNED NTStatus = 0xC000CF15 + STATUS_CLOUD_FILE_REQUEST_ABORTED NTStatus = 0xC000CF16 + STATUS_CLOUD_FILE_PROPERTY_CORRUPT NTStatus = 0xC000CF17 + STATUS_CLOUD_FILE_ACCESS_DENIED NTStatus = 0xC000CF18 + STATUS_CLOUD_FILE_INCOMPATIBLE_HARDLINKS NTStatus = 0xC000CF19 + STATUS_CLOUD_FILE_PROPERTY_LOCK_CONFLICT NTStatus = 0xC000CF1A + STATUS_CLOUD_FILE_REQUEST_CANCELED NTStatus = 0xC000CF1B + STATUS_CLOUD_FILE_PROVIDER_TERMINATED NTStatus = 0xC000CF1D + STATUS_NOT_A_CLOUD_SYNC_ROOT NTStatus = 0xC000CF1E + STATUS_CLOUD_FILE_REQUEST_TIMEOUT NTStatus = 0xC000CF1F + STATUS_ACPI_INVALID_OPCODE NTStatus = 0xC0140001 + STATUS_ACPI_STACK_OVERFLOW NTStatus = 0xC0140002 + STATUS_ACPI_ASSERT_FAILED NTStatus = 0xC0140003 + STATUS_ACPI_INVALID_INDEX NTStatus = 0xC0140004 + STATUS_ACPI_INVALID_ARGUMENT NTStatus = 0xC0140005 + STATUS_ACPI_FATAL NTStatus = 0xC0140006 + STATUS_ACPI_INVALID_SUPERNAME NTStatus = 0xC0140007 + STATUS_ACPI_INVALID_ARGTYPE NTStatus = 0xC0140008 + STATUS_ACPI_INVALID_OBJTYPE NTStatus = 0xC0140009 + STATUS_ACPI_INVALID_TARGETTYPE NTStatus = 0xC014000A + STATUS_ACPI_INCORRECT_ARGUMENT_COUNT NTStatus = 0xC014000B + STATUS_ACPI_ADDRESS_NOT_MAPPED NTStatus = 0xC014000C + STATUS_ACPI_INVALID_EVENTTYPE NTStatus = 0xC014000D + STATUS_ACPI_HANDLER_COLLISION NTStatus = 0xC014000E + STATUS_ACPI_INVALID_DATA NTStatus = 0xC014000F + STATUS_ACPI_INVALID_REGION NTStatus = 0xC0140010 + STATUS_ACPI_INVALID_ACCESS_SIZE NTStatus = 0xC0140011 + STATUS_ACPI_ACQUIRE_GLOBAL_LOCK NTStatus = 0xC0140012 + STATUS_ACPI_ALREADY_INITIALIZED NTStatus = 0xC0140013 + STATUS_ACPI_NOT_INITIALIZED NTStatus = 0xC0140014 + STATUS_ACPI_INVALID_MUTEX_LEVEL NTStatus = 0xC0140015 + STATUS_ACPI_MUTEX_NOT_OWNED NTStatus = 0xC0140016 + STATUS_ACPI_MUTEX_NOT_OWNER NTStatus = 0xC0140017 + STATUS_ACPI_RS_ACCESS NTStatus = 0xC0140018 + STATUS_ACPI_INVALID_TABLE NTStatus = 0xC0140019 + STATUS_ACPI_REG_HANDLER_FAILED NTStatus = 0xC0140020 + STATUS_ACPI_POWER_REQUEST_FAILED NTStatus = 0xC0140021 + STATUS_CTX_WINSTATION_NAME_INVALID NTStatus = 0xC00A0001 + STATUS_CTX_INVALID_PD NTStatus = 0xC00A0002 + STATUS_CTX_PD_NOT_FOUND NTStatus = 0xC00A0003 + STATUS_CTX_CDM_CONNECT NTStatus = 0x400A0004 + STATUS_CTX_CDM_DISCONNECT NTStatus = 0x400A0005 + STATUS_CTX_CLOSE_PENDING NTStatus = 0xC00A0006 + STATUS_CTX_NO_OUTBUF NTStatus = 0xC00A0007 + STATUS_CTX_MODEM_INF_NOT_FOUND NTStatus = 0xC00A0008 + STATUS_CTX_INVALID_MODEMNAME NTStatus = 0xC00A0009 + STATUS_CTX_RESPONSE_ERROR NTStatus = 0xC00A000A + STATUS_CTX_MODEM_RESPONSE_TIMEOUT NTStatus = 0xC00A000B + STATUS_CTX_MODEM_RESPONSE_NO_CARRIER NTStatus = 0xC00A000C + STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE NTStatus = 0xC00A000D + STATUS_CTX_MODEM_RESPONSE_BUSY NTStatus = 0xC00A000E + STATUS_CTX_MODEM_RESPONSE_VOICE NTStatus = 0xC00A000F + STATUS_CTX_TD_ERROR NTStatus = 0xC00A0010 + STATUS_CTX_LICENSE_CLIENT_INVALID NTStatus = 0xC00A0012 + STATUS_CTX_LICENSE_NOT_AVAILABLE NTStatus = 0xC00A0013 + STATUS_CTX_LICENSE_EXPIRED NTStatus = 0xC00A0014 + STATUS_CTX_WINSTATION_NOT_FOUND NTStatus = 0xC00A0015 + STATUS_CTX_WINSTATION_NAME_COLLISION NTStatus = 0xC00A0016 + STATUS_CTX_WINSTATION_BUSY NTStatus = 0xC00A0017 + STATUS_CTX_BAD_VIDEO_MODE NTStatus = 0xC00A0018 + STATUS_CTX_GRAPHICS_INVALID NTStatus = 0xC00A0022 + STATUS_CTX_NOT_CONSOLE NTStatus = 0xC00A0024 + STATUS_CTX_CLIENT_QUERY_TIMEOUT NTStatus = 0xC00A0026 + STATUS_CTX_CONSOLE_DISCONNECT NTStatus = 0xC00A0027 + STATUS_CTX_CONSOLE_CONNECT NTStatus = 0xC00A0028 + STATUS_CTX_SHADOW_DENIED NTStatus = 0xC00A002A + STATUS_CTX_WINSTATION_ACCESS_DENIED NTStatus = 0xC00A002B + STATUS_CTX_INVALID_WD NTStatus = 0xC00A002E + STATUS_CTX_WD_NOT_FOUND NTStatus = 0xC00A002F + STATUS_CTX_SHADOW_INVALID NTStatus = 0xC00A0030 + STATUS_CTX_SHADOW_DISABLED NTStatus = 0xC00A0031 + STATUS_RDP_PROTOCOL_ERROR NTStatus = 0xC00A0032 + STATUS_CTX_CLIENT_LICENSE_NOT_SET NTStatus = 0xC00A0033 + STATUS_CTX_CLIENT_LICENSE_IN_USE NTStatus = 0xC00A0034 + STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE NTStatus = 0xC00A0035 + STATUS_CTX_SHADOW_NOT_RUNNING NTStatus = 0xC00A0036 + STATUS_CTX_LOGON_DISABLED NTStatus = 0xC00A0037 + STATUS_CTX_SECURITY_LAYER_ERROR NTStatus = 0xC00A0038 + STATUS_TS_INCOMPATIBLE_SESSIONS NTStatus = 0xC00A0039 + STATUS_TS_VIDEO_SUBSYSTEM_ERROR NTStatus = 0xC00A003A + STATUS_PNP_BAD_MPS_TABLE NTStatus = 0xC0040035 + STATUS_PNP_TRANSLATION_FAILED NTStatus = 0xC0040036 + STATUS_PNP_IRQ_TRANSLATION_FAILED NTStatus = 0xC0040037 + STATUS_PNP_INVALID_ID NTStatus = 0xC0040038 + STATUS_IO_REISSUE_AS_CACHED NTStatus = 0xC0040039 + STATUS_MUI_FILE_NOT_FOUND NTStatus = 0xC00B0001 + STATUS_MUI_INVALID_FILE NTStatus = 0xC00B0002 + STATUS_MUI_INVALID_RC_CONFIG NTStatus = 0xC00B0003 + STATUS_MUI_INVALID_LOCALE_NAME NTStatus = 0xC00B0004 + STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME NTStatus = 0xC00B0005 + STATUS_MUI_FILE_NOT_LOADED NTStatus = 0xC00B0006 + STATUS_RESOURCE_ENUM_USER_STOP NTStatus = 0xC00B0007 + STATUS_FLT_NO_HANDLER_DEFINED NTStatus = 0xC01C0001 + STATUS_FLT_CONTEXT_ALREADY_DEFINED NTStatus = 0xC01C0002 + STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST NTStatus = 0xC01C0003 + STATUS_FLT_DISALLOW_FAST_IO NTStatus = 0xC01C0004 + STATUS_FLT_INVALID_NAME_REQUEST NTStatus = 0xC01C0005 + STATUS_FLT_NOT_SAFE_TO_POST_OPERATION NTStatus = 0xC01C0006 + STATUS_FLT_NOT_INITIALIZED NTStatus = 0xC01C0007 + STATUS_FLT_FILTER_NOT_READY NTStatus = 0xC01C0008 + STATUS_FLT_POST_OPERATION_CLEANUP NTStatus = 0xC01C0009 + STATUS_FLT_INTERNAL_ERROR NTStatus = 0xC01C000A + STATUS_FLT_DELETING_OBJECT NTStatus = 0xC01C000B + STATUS_FLT_MUST_BE_NONPAGED_POOL NTStatus = 0xC01C000C + STATUS_FLT_DUPLICATE_ENTRY NTStatus = 0xC01C000D + STATUS_FLT_CBDQ_DISABLED NTStatus = 0xC01C000E + STATUS_FLT_DO_NOT_ATTACH NTStatus = 0xC01C000F + STATUS_FLT_DO_NOT_DETACH NTStatus = 0xC01C0010 + STATUS_FLT_INSTANCE_ALTITUDE_COLLISION NTStatus = 0xC01C0011 + STATUS_FLT_INSTANCE_NAME_COLLISION NTStatus = 0xC01C0012 + STATUS_FLT_FILTER_NOT_FOUND NTStatus = 0xC01C0013 + STATUS_FLT_VOLUME_NOT_FOUND NTStatus = 0xC01C0014 + STATUS_FLT_INSTANCE_NOT_FOUND NTStatus = 0xC01C0015 + STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND NTStatus = 0xC01C0016 + STATUS_FLT_INVALID_CONTEXT_REGISTRATION NTStatus = 0xC01C0017 + STATUS_FLT_NAME_CACHE_MISS NTStatus = 0xC01C0018 + STATUS_FLT_NO_DEVICE_OBJECT NTStatus = 0xC01C0019 + STATUS_FLT_VOLUME_ALREADY_MOUNTED NTStatus = 0xC01C001A + STATUS_FLT_ALREADY_ENLISTED NTStatus = 0xC01C001B + STATUS_FLT_CONTEXT_ALREADY_LINKED NTStatus = 0xC01C001C + STATUS_FLT_NO_WAITER_FOR_REPLY NTStatus = 0xC01C0020 + STATUS_FLT_REGISTRATION_BUSY NTStatus = 0xC01C0023 + STATUS_SXS_SECTION_NOT_FOUND NTStatus = 0xC0150001 + STATUS_SXS_CANT_GEN_ACTCTX NTStatus = 0xC0150002 + STATUS_SXS_INVALID_ACTCTXDATA_FORMAT NTStatus = 0xC0150003 + STATUS_SXS_ASSEMBLY_NOT_FOUND NTStatus = 0xC0150004 + STATUS_SXS_MANIFEST_FORMAT_ERROR NTStatus = 0xC0150005 + STATUS_SXS_MANIFEST_PARSE_ERROR NTStatus = 0xC0150006 + STATUS_SXS_ACTIVATION_CONTEXT_DISABLED NTStatus = 0xC0150007 + STATUS_SXS_KEY_NOT_FOUND NTStatus = 0xC0150008 + STATUS_SXS_VERSION_CONFLICT NTStatus = 0xC0150009 + STATUS_SXS_WRONG_SECTION_TYPE NTStatus = 0xC015000A + STATUS_SXS_THREAD_QUERIES_DISABLED NTStatus = 0xC015000B + STATUS_SXS_ASSEMBLY_MISSING NTStatus = 0xC015000C + STATUS_SXS_RELEASE_ACTIVATION_CONTEXT NTStatus = 0x4015000D + STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET NTStatus = 0xC015000E + STATUS_SXS_EARLY_DEACTIVATION NTStatus = 0xC015000F + STATUS_SXS_INVALID_DEACTIVATION NTStatus = 0xC0150010 + STATUS_SXS_MULTIPLE_DEACTIVATION NTStatus = 0xC0150011 + STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY NTStatus = 0xC0150012 + STATUS_SXS_PROCESS_TERMINATION_REQUESTED NTStatus = 0xC0150013 + STATUS_SXS_CORRUPT_ACTIVATION_STACK NTStatus = 0xC0150014 + STATUS_SXS_CORRUPTION NTStatus = 0xC0150015 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE NTStatus = 0xC0150016 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME NTStatus = 0xC0150017 + STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE NTStatus = 0xC0150018 + STATUS_SXS_IDENTITY_PARSE_ERROR NTStatus = 0xC0150019 + STATUS_SXS_COMPONENT_STORE_CORRUPT NTStatus = 0xC015001A + STATUS_SXS_FILE_HASH_MISMATCH NTStatus = 0xC015001B + STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT NTStatus = 0xC015001C + STATUS_SXS_IDENTITIES_DIFFERENT NTStatus = 0xC015001D + STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT NTStatus = 0xC015001E + STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY NTStatus = 0xC015001F + STATUS_ADVANCED_INSTALLER_FAILED NTStatus = 0xC0150020 + STATUS_XML_ENCODING_MISMATCH NTStatus = 0xC0150021 + STATUS_SXS_MANIFEST_TOO_BIG NTStatus = 0xC0150022 + STATUS_SXS_SETTING_NOT_REGISTERED NTStatus = 0xC0150023 + STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE NTStatus = 0xC0150024 + STATUS_SMI_PRIMITIVE_INSTALLER_FAILED NTStatus = 0xC0150025 + STATUS_GENERIC_COMMAND_FAILED NTStatus = 0xC0150026 + STATUS_SXS_FILE_HASH_MISSING NTStatus = 0xC0150027 + STATUS_CLUSTER_INVALID_NODE NTStatus = 0xC0130001 + STATUS_CLUSTER_NODE_EXISTS NTStatus = 0xC0130002 + STATUS_CLUSTER_JOIN_IN_PROGRESS NTStatus = 0xC0130003 + STATUS_CLUSTER_NODE_NOT_FOUND NTStatus = 0xC0130004 + STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND NTStatus = 0xC0130005 + STATUS_CLUSTER_NETWORK_EXISTS NTStatus = 0xC0130006 + STATUS_CLUSTER_NETWORK_NOT_FOUND NTStatus = 0xC0130007 + STATUS_CLUSTER_NETINTERFACE_EXISTS NTStatus = 0xC0130008 + STATUS_CLUSTER_NETINTERFACE_NOT_FOUND NTStatus = 0xC0130009 + STATUS_CLUSTER_INVALID_REQUEST NTStatus = 0xC013000A + STATUS_CLUSTER_INVALID_NETWORK_PROVIDER NTStatus = 0xC013000B + STATUS_CLUSTER_NODE_DOWN NTStatus = 0xC013000C + STATUS_CLUSTER_NODE_UNREACHABLE NTStatus = 0xC013000D + STATUS_CLUSTER_NODE_NOT_MEMBER NTStatus = 0xC013000E + STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS NTStatus = 0xC013000F + STATUS_CLUSTER_INVALID_NETWORK NTStatus = 0xC0130010 + STATUS_CLUSTER_NO_NET_ADAPTERS NTStatus = 0xC0130011 + STATUS_CLUSTER_NODE_UP NTStatus = 0xC0130012 + STATUS_CLUSTER_NODE_PAUSED NTStatus = 0xC0130013 + STATUS_CLUSTER_NODE_NOT_PAUSED NTStatus = 0xC0130014 + STATUS_CLUSTER_NO_SECURITY_CONTEXT NTStatus = 0xC0130015 + STATUS_CLUSTER_NETWORK_NOT_INTERNAL NTStatus = 0xC0130016 + STATUS_CLUSTER_POISONED NTStatus = 0xC0130017 + STATUS_CLUSTER_NON_CSV_PATH NTStatus = 0xC0130018 + STATUS_CLUSTER_CSV_VOLUME_NOT_LOCAL NTStatus = 0xC0130019 + STATUS_CLUSTER_CSV_READ_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0xC0130020 + STATUS_CLUSTER_CSV_AUTO_PAUSE_ERROR NTStatus = 0xC0130021 + STATUS_CLUSTER_CSV_REDIRECTED NTStatus = 0xC0130022 + STATUS_CLUSTER_CSV_NOT_REDIRECTED NTStatus = 0xC0130023 + STATUS_CLUSTER_CSV_VOLUME_DRAINING NTStatus = 0xC0130024 + STATUS_CLUSTER_CSV_SNAPSHOT_CREATION_IN_PROGRESS NTStatus = 0xC0130025 + STATUS_CLUSTER_CSV_VOLUME_DRAINING_SUCCEEDED_DOWNLEVEL NTStatus = 0xC0130026 + STATUS_CLUSTER_CSV_NO_SNAPSHOTS NTStatus = 0xC0130027 + STATUS_CSV_IO_PAUSE_TIMEOUT NTStatus = 0xC0130028 + STATUS_CLUSTER_CSV_INVALID_HANDLE NTStatus = 0xC0130029 + STATUS_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR NTStatus = 0xC0130030 + STATUS_CLUSTER_CAM_TICKET_REPLAY_DETECTED NTStatus = 0xC0130031 + STATUS_TRANSACTIONAL_CONFLICT NTStatus = 0xC0190001 + STATUS_INVALID_TRANSACTION NTStatus = 0xC0190002 + STATUS_TRANSACTION_NOT_ACTIVE NTStatus = 0xC0190003 + STATUS_TM_INITIALIZATION_FAILED NTStatus = 0xC0190004 + STATUS_RM_NOT_ACTIVE NTStatus = 0xC0190005 + STATUS_RM_METADATA_CORRUPT NTStatus = 0xC0190006 + STATUS_TRANSACTION_NOT_JOINED NTStatus = 0xC0190007 + STATUS_DIRECTORY_NOT_RM NTStatus = 0xC0190008 + STATUS_COULD_NOT_RESIZE_LOG NTStatus = 0x80190009 + STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE NTStatus = 0xC019000A + STATUS_LOG_RESIZE_INVALID_SIZE NTStatus = 0xC019000B + STATUS_REMOTE_FILE_VERSION_MISMATCH NTStatus = 0xC019000C + STATUS_CRM_PROTOCOL_ALREADY_EXISTS NTStatus = 0xC019000F + STATUS_TRANSACTION_PROPAGATION_FAILED NTStatus = 0xC0190010 + STATUS_CRM_PROTOCOL_NOT_FOUND NTStatus = 0xC0190011 + STATUS_TRANSACTION_SUPERIOR_EXISTS NTStatus = 0xC0190012 + STATUS_TRANSACTION_REQUEST_NOT_VALID NTStatus = 0xC0190013 + STATUS_TRANSACTION_NOT_REQUESTED NTStatus = 0xC0190014 + STATUS_TRANSACTION_ALREADY_ABORTED NTStatus = 0xC0190015 + STATUS_TRANSACTION_ALREADY_COMMITTED NTStatus = 0xC0190016 + STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER NTStatus = 0xC0190017 + STATUS_CURRENT_TRANSACTION_NOT_VALID NTStatus = 0xC0190018 + STATUS_LOG_GROWTH_FAILED NTStatus = 0xC0190019 + STATUS_OBJECT_NO_LONGER_EXISTS NTStatus = 0xC0190021 + STATUS_STREAM_MINIVERSION_NOT_FOUND NTStatus = 0xC0190022 + STATUS_STREAM_MINIVERSION_NOT_VALID NTStatus = 0xC0190023 + STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION NTStatus = 0xC0190024 + STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT NTStatus = 0xC0190025 + STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS NTStatus = 0xC0190026 + STATUS_HANDLE_NO_LONGER_VALID NTStatus = 0xC0190028 + STATUS_NO_TXF_METADATA NTStatus = 0x80190029 + STATUS_LOG_CORRUPTION_DETECTED NTStatus = 0xC0190030 + STATUS_CANT_RECOVER_WITH_HANDLE_OPEN NTStatus = 0x80190031 + STATUS_RM_DISCONNECTED NTStatus = 0xC0190032 + STATUS_ENLISTMENT_NOT_SUPERIOR NTStatus = 0xC0190033 + STATUS_RECOVERY_NOT_NEEDED NTStatus = 0x40190034 + STATUS_RM_ALREADY_STARTED NTStatus = 0x40190035 + STATUS_FILE_IDENTITY_NOT_PERSISTENT NTStatus = 0xC0190036 + STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY NTStatus = 0xC0190037 + STATUS_CANT_CROSS_RM_BOUNDARY NTStatus = 0xC0190038 + STATUS_TXF_DIR_NOT_EMPTY NTStatus = 0xC0190039 + STATUS_INDOUBT_TRANSACTIONS_EXIST NTStatus = 0xC019003A + STATUS_TM_VOLATILE NTStatus = 0xC019003B + STATUS_ROLLBACK_TIMER_EXPIRED NTStatus = 0xC019003C + STATUS_TXF_ATTRIBUTE_CORRUPT NTStatus = 0xC019003D + STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC019003E + STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED NTStatus = 0xC019003F + STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE NTStatus = 0xC0190040 + STATUS_TXF_METADATA_ALREADY_PRESENT NTStatus = 0x80190041 + STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET NTStatus = 0x80190042 + STATUS_TRANSACTION_REQUIRED_PROMOTION NTStatus = 0xC0190043 + STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION NTStatus = 0xC0190044 + STATUS_TRANSACTIONS_NOT_FROZEN NTStatus = 0xC0190045 + STATUS_TRANSACTION_FREEZE_IN_PROGRESS NTStatus = 0xC0190046 + STATUS_NOT_SNAPSHOT_VOLUME NTStatus = 0xC0190047 + STATUS_NO_SAVEPOINT_WITH_OPEN_FILES NTStatus = 0xC0190048 + STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190049 + STATUS_TM_IDENTITY_MISMATCH NTStatus = 0xC019004A + STATUS_FLOATED_SECTION NTStatus = 0xC019004B + STATUS_CANNOT_ACCEPT_TRANSACTED_WORK NTStatus = 0xC019004C + STATUS_CANNOT_ABORT_TRANSACTIONS NTStatus = 0xC019004D + STATUS_TRANSACTION_NOT_FOUND NTStatus = 0xC019004E + STATUS_RESOURCEMANAGER_NOT_FOUND NTStatus = 0xC019004F + STATUS_ENLISTMENT_NOT_FOUND NTStatus = 0xC0190050 + STATUS_TRANSACTIONMANAGER_NOT_FOUND NTStatus = 0xC0190051 + STATUS_TRANSACTIONMANAGER_NOT_ONLINE NTStatus = 0xC0190052 + STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION NTStatus = 0xC0190053 + STATUS_TRANSACTION_NOT_ROOT NTStatus = 0xC0190054 + STATUS_TRANSACTION_OBJECT_EXPIRED NTStatus = 0xC0190055 + STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190056 + STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED NTStatus = 0xC0190057 + STATUS_TRANSACTION_RECORD_TOO_LONG NTStatus = 0xC0190058 + STATUS_NO_LINK_TRACKING_IN_TRANSACTION NTStatus = 0xC0190059 + STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION NTStatus = 0xC019005A + STATUS_TRANSACTION_INTEGRITY_VIOLATED NTStatus = 0xC019005B + STATUS_TRANSACTIONMANAGER_IDENTITY_MISMATCH NTStatus = 0xC019005C + STATUS_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT NTStatus = 0xC019005D + STATUS_TRANSACTION_MUST_WRITETHROUGH NTStatus = 0xC019005E + STATUS_TRANSACTION_NO_SUPERIOR NTStatus = 0xC019005F + STATUS_EXPIRED_HANDLE NTStatus = 0xC0190060 + STATUS_TRANSACTION_NOT_ENLISTED NTStatus = 0xC0190061 + STATUS_LOG_SECTOR_INVALID NTStatus = 0xC01A0001 + STATUS_LOG_SECTOR_PARITY_INVALID NTStatus = 0xC01A0002 + STATUS_LOG_SECTOR_REMAPPED NTStatus = 0xC01A0003 + STATUS_LOG_BLOCK_INCOMPLETE NTStatus = 0xC01A0004 + STATUS_LOG_INVALID_RANGE NTStatus = 0xC01A0005 + STATUS_LOG_BLOCKS_EXHAUSTED NTStatus = 0xC01A0006 + STATUS_LOG_READ_CONTEXT_INVALID NTStatus = 0xC01A0007 + STATUS_LOG_RESTART_INVALID NTStatus = 0xC01A0008 + STATUS_LOG_BLOCK_VERSION NTStatus = 0xC01A0009 + STATUS_LOG_BLOCK_INVALID NTStatus = 0xC01A000A + STATUS_LOG_READ_MODE_INVALID NTStatus = 0xC01A000B + STATUS_LOG_NO_RESTART NTStatus = 0x401A000C + STATUS_LOG_METADATA_CORRUPT NTStatus = 0xC01A000D + STATUS_LOG_METADATA_INVALID NTStatus = 0xC01A000E + STATUS_LOG_METADATA_INCONSISTENT NTStatus = 0xC01A000F + STATUS_LOG_RESERVATION_INVALID NTStatus = 0xC01A0010 + STATUS_LOG_CANT_DELETE NTStatus = 0xC01A0011 + STATUS_LOG_CONTAINER_LIMIT_EXCEEDED NTStatus = 0xC01A0012 + STATUS_LOG_START_OF_LOG NTStatus = 0xC01A0013 + STATUS_LOG_POLICY_ALREADY_INSTALLED NTStatus = 0xC01A0014 + STATUS_LOG_POLICY_NOT_INSTALLED NTStatus = 0xC01A0015 + STATUS_LOG_POLICY_INVALID NTStatus = 0xC01A0016 + STATUS_LOG_POLICY_CONFLICT NTStatus = 0xC01A0017 + STATUS_LOG_PINNED_ARCHIVE_TAIL NTStatus = 0xC01A0018 + STATUS_LOG_RECORD_NONEXISTENT NTStatus = 0xC01A0019 + STATUS_LOG_RECORDS_RESERVED_INVALID NTStatus = 0xC01A001A + STATUS_LOG_SPACE_RESERVED_INVALID NTStatus = 0xC01A001B + STATUS_LOG_TAIL_INVALID NTStatus = 0xC01A001C + STATUS_LOG_FULL NTStatus = 0xC01A001D + STATUS_LOG_MULTIPLEXED NTStatus = 0xC01A001E + STATUS_LOG_DEDICATED NTStatus = 0xC01A001F + STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS NTStatus = 0xC01A0020 + STATUS_LOG_ARCHIVE_IN_PROGRESS NTStatus = 0xC01A0021 + STATUS_LOG_EPHEMERAL NTStatus = 0xC01A0022 + STATUS_LOG_NOT_ENOUGH_CONTAINERS NTStatus = 0xC01A0023 + STATUS_LOG_CLIENT_ALREADY_REGISTERED NTStatus = 0xC01A0024 + STATUS_LOG_CLIENT_NOT_REGISTERED NTStatus = 0xC01A0025 + STATUS_LOG_FULL_HANDLER_IN_PROGRESS NTStatus = 0xC01A0026 + STATUS_LOG_CONTAINER_READ_FAILED NTStatus = 0xC01A0027 + STATUS_LOG_CONTAINER_WRITE_FAILED NTStatus = 0xC01A0028 + STATUS_LOG_CONTAINER_OPEN_FAILED NTStatus = 0xC01A0029 + STATUS_LOG_CONTAINER_STATE_INVALID NTStatus = 0xC01A002A + STATUS_LOG_STATE_INVALID NTStatus = 0xC01A002B + STATUS_LOG_PINNED NTStatus = 0xC01A002C + STATUS_LOG_METADATA_FLUSH_FAILED NTStatus = 0xC01A002D + STATUS_LOG_INCONSISTENT_SECURITY NTStatus = 0xC01A002E + STATUS_LOG_APPENDED_FLUSH_FAILED NTStatus = 0xC01A002F + STATUS_LOG_PINNED_RESERVATION NTStatus = 0xC01A0030 + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC01B00EA + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED NTStatus = 0x801B00EB + STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST NTStatus = 0x401B00EC + STATUS_MONITOR_NO_DESCRIPTOR NTStatus = 0xC01D0001 + STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT NTStatus = 0xC01D0002 + STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM NTStatus = 0xC01D0003 + STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK NTStatus = 0xC01D0004 + STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED NTStatus = 0xC01D0005 + STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK NTStatus = 0xC01D0006 + STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK NTStatus = 0xC01D0007 + STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA NTStatus = 0xC01D0008 + STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK NTStatus = 0xC01D0009 + STATUS_MONITOR_INVALID_MANUFACTURE_DATE NTStatus = 0xC01D000A + STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER NTStatus = 0xC01E0000 + STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER NTStatus = 0xC01E0001 + STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER NTStatus = 0xC01E0002 + STATUS_GRAPHICS_ADAPTER_WAS_RESET NTStatus = 0xC01E0003 + STATUS_GRAPHICS_INVALID_DRIVER_MODEL NTStatus = 0xC01E0004 + STATUS_GRAPHICS_PRESENT_MODE_CHANGED NTStatus = 0xC01E0005 + STATUS_GRAPHICS_PRESENT_OCCLUDED NTStatus = 0xC01E0006 + STATUS_GRAPHICS_PRESENT_DENIED NTStatus = 0xC01E0007 + STATUS_GRAPHICS_CANNOTCOLORCONVERT NTStatus = 0xC01E0008 + STATUS_GRAPHICS_DRIVER_MISMATCH NTStatus = 0xC01E0009 + STATUS_GRAPHICS_PARTIAL_DATA_POPULATED NTStatus = 0x401E000A + STATUS_GRAPHICS_PRESENT_REDIRECTION_DISABLED NTStatus = 0xC01E000B + STATUS_GRAPHICS_PRESENT_UNOCCLUDED NTStatus = 0xC01E000C + STATUS_GRAPHICS_WINDOWDC_NOT_AVAILABLE NTStatus = 0xC01E000D + STATUS_GRAPHICS_WINDOWLESS_PRESENT_DISABLED NTStatus = 0xC01E000E + STATUS_GRAPHICS_PRESENT_INVALID_WINDOW NTStatus = 0xC01E000F + STATUS_GRAPHICS_PRESENT_BUFFER_NOT_BOUND NTStatus = 0xC01E0010 + STATUS_GRAPHICS_VAIL_STATE_CHANGED NTStatus = 0xC01E0011 + STATUS_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN NTStatus = 0xC01E0012 + STATUS_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED NTStatus = 0xC01E0013 + STATUS_GRAPHICS_NO_VIDEO_MEMORY NTStatus = 0xC01E0100 + STATUS_GRAPHICS_CANT_LOCK_MEMORY NTStatus = 0xC01E0101 + STATUS_GRAPHICS_ALLOCATION_BUSY NTStatus = 0xC01E0102 + STATUS_GRAPHICS_TOO_MANY_REFERENCES NTStatus = 0xC01E0103 + STATUS_GRAPHICS_TRY_AGAIN_LATER NTStatus = 0xC01E0104 + STATUS_GRAPHICS_TRY_AGAIN_NOW NTStatus = 0xC01E0105 + STATUS_GRAPHICS_ALLOCATION_INVALID NTStatus = 0xC01E0106 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE NTStatus = 0xC01E0107 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED NTStatus = 0xC01E0108 + STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION NTStatus = 0xC01E0109 + STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE NTStatus = 0xC01E0110 + STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION NTStatus = 0xC01E0111 + STATUS_GRAPHICS_ALLOCATION_CLOSED NTStatus = 0xC01E0112 + STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE NTStatus = 0xC01E0113 + STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE NTStatus = 0xC01E0114 + STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE NTStatus = 0xC01E0115 + STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST NTStatus = 0xC01E0116 + STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE NTStatus = 0xC01E0200 + STATUS_GRAPHICS_SKIP_ALLOCATION_PREPARATION NTStatus = 0x401E0201 + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY NTStatus = 0xC01E0300 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED NTStatus = 0xC01E0301 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED NTStatus = 0xC01E0302 + STATUS_GRAPHICS_INVALID_VIDPN NTStatus = 0xC01E0303 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE NTStatus = 0xC01E0304 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET NTStatus = 0xC01E0305 + STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED NTStatus = 0xC01E0306 + STATUS_GRAPHICS_MODE_NOT_PINNED NTStatus = 0x401E0307 + STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET NTStatus = 0xC01E0308 + STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET NTStatus = 0xC01E0309 + STATUS_GRAPHICS_INVALID_FREQUENCY NTStatus = 0xC01E030A + STATUS_GRAPHICS_INVALID_ACTIVE_REGION NTStatus = 0xC01E030B + STATUS_GRAPHICS_INVALID_TOTAL_REGION NTStatus = 0xC01E030C + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE NTStatus = 0xC01E0310 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE NTStatus = 0xC01E0311 + STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET NTStatus = 0xC01E0312 + STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY NTStatus = 0xC01E0313 + STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET NTStatus = 0xC01E0314 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET NTStatus = 0xC01E0315 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET NTStatus = 0xC01E0316 + STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET NTStatus = 0xC01E0317 + STATUS_GRAPHICS_TARGET_ALREADY_IN_SET NTStatus = 0xC01E0318 + STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH NTStatus = 0xC01E0319 + STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY NTStatus = 0xC01E031A + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET NTStatus = 0xC01E031B + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE NTStatus = 0xC01E031C + STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET NTStatus = 0xC01E031D + STATUS_GRAPHICS_NO_PREFERRED_MODE NTStatus = 0x401E031E + STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET NTStatus = 0xC01E031F + STATUS_GRAPHICS_STALE_MODESET NTStatus = 0xC01E0320 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET NTStatus = 0xC01E0321 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE NTStatus = 0xC01E0322 + STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN NTStatus = 0xC01E0323 + STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0324 + STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION NTStatus = 0xC01E0325 + STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES NTStatus = 0xC01E0326 + STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY NTStatus = 0xC01E0327 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE NTStatus = 0xC01E0328 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET NTStatus = 0xC01E0329 + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET NTStatus = 0xC01E032A + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR NTStatus = 0xC01E032B + STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET NTStatus = 0xC01E032C + STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET NTStatus = 0xC01E032D + STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE NTStatus = 0xC01E032E + STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE NTStatus = 0xC01E032F + STATUS_GRAPHICS_RESOURCES_NOT_RELATED NTStatus = 0xC01E0330 + STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0331 + STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0332 + STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET NTStatus = 0xC01E0333 + STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER NTStatus = 0xC01E0334 + STATUS_GRAPHICS_NO_VIDPNMGR NTStatus = 0xC01E0335 + STATUS_GRAPHICS_NO_ACTIVE_VIDPN NTStatus = 0xC01E0336 + STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY NTStatus = 0xC01E0337 + STATUS_GRAPHICS_MONITOR_NOT_CONNECTED NTStatus = 0xC01E0338 + STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY NTStatus = 0xC01E0339 + STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE NTStatus = 0xC01E033A + STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE NTStatus = 0xC01E033B + STATUS_GRAPHICS_INVALID_STRIDE NTStatus = 0xC01E033C + STATUS_GRAPHICS_INVALID_PIXELFORMAT NTStatus = 0xC01E033D + STATUS_GRAPHICS_INVALID_COLORBASIS NTStatus = 0xC01E033E + STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE NTStatus = 0xC01E033F + STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY NTStatus = 0xC01E0340 + STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT NTStatus = 0xC01E0341 + STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE NTStatus = 0xC01E0342 + STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN NTStatus = 0xC01E0343 + STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL NTStatus = 0xC01E0344 + STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION NTStatus = 0xC01E0345 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED NTStatus = 0xC01E0346 + STATUS_GRAPHICS_INVALID_GAMMA_RAMP NTStatus = 0xC01E0347 + STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED NTStatus = 0xC01E0348 + STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED NTStatus = 0xC01E0349 + STATUS_GRAPHICS_MODE_NOT_IN_MODESET NTStatus = 0xC01E034A + STATUS_GRAPHICS_DATASET_IS_EMPTY NTStatus = 0x401E034B + STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET NTStatus = 0x401E034C + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON NTStatus = 0xC01E034D + STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE NTStatus = 0xC01E034E + STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE NTStatus = 0xC01E034F + STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS NTStatus = 0xC01E0350 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED NTStatus = 0x401E0351 + STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING NTStatus = 0xC01E0352 + STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED NTStatus = 0xC01E0353 + STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS NTStatus = 0xC01E0354 + STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT NTStatus = 0xC01E0355 + STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM NTStatus = 0xC01E0356 + STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN NTStatus = 0xC01E0357 + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT NTStatus = 0xC01E0358 + STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED NTStatus = 0xC01E0359 + STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION NTStatus = 0xC01E035A + STATUS_GRAPHICS_INVALID_CLIENT_TYPE NTStatus = 0xC01E035B + STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET NTStatus = 0xC01E035C + STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED NTStatus = 0xC01E0400 + STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED NTStatus = 0xC01E0401 + STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS NTStatus = 0x401E042F + STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER NTStatus = 0xC01E0430 + STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED NTStatus = 0xC01E0431 + STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED NTStatus = 0xC01E0432 + STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY NTStatus = 0xC01E0433 + STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED NTStatus = 0xC01E0434 + STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON NTStatus = 0xC01E0435 + STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE NTStatus = 0xC01E0436 + STATUS_GRAPHICS_LEADLINK_START_DEFERRED NTStatus = 0x401E0437 + STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER NTStatus = 0xC01E0438 + STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY NTStatus = 0x401E0439 + STATUS_GRAPHICS_START_DEFERRED NTStatus = 0x401E043A + STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED NTStatus = 0xC01E043B + STATUS_GRAPHICS_DEPENDABLE_CHILD_STATUS NTStatus = 0x401E043C + STATUS_GRAPHICS_OPM_NOT_SUPPORTED NTStatus = 0xC01E0500 + STATUS_GRAPHICS_COPP_NOT_SUPPORTED NTStatus = 0xC01E0501 + STATUS_GRAPHICS_UAB_NOT_SUPPORTED NTStatus = 0xC01E0502 + STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS NTStatus = 0xC01E0503 + STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST NTStatus = 0xC01E0505 + STATUS_GRAPHICS_OPM_INTERNAL_ERROR NTStatus = 0xC01E050B + STATUS_GRAPHICS_OPM_INVALID_HANDLE NTStatus = 0xC01E050C + STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH NTStatus = 0xC01E050E + STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED NTStatus = 0xC01E050F + STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED NTStatus = 0xC01E0510 + STATUS_GRAPHICS_PVP_HFS_FAILED NTStatus = 0xC01E0511 + STATUS_GRAPHICS_OPM_INVALID_SRM NTStatus = 0xC01E0512 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP NTStatus = 0xC01E0513 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP NTStatus = 0xC01E0514 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA NTStatus = 0xC01E0515 + STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET NTStatus = 0xC01E0516 + STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH NTStatus = 0xC01E0517 + STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE NTStatus = 0xC01E0518 + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS NTStatus = 0xC01E051A + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS NTStatus = 0xC01E051C + STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST NTStatus = 0xC01E051D + STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR NTStatus = 0xC01E051E + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS NTStatus = 0xC01E051F + STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED NTStatus = 0xC01E0520 + STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST NTStatus = 0xC01E0521 + STATUS_GRAPHICS_I2C_NOT_SUPPORTED NTStatus = 0xC01E0580 + STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST NTStatus = 0xC01E0581 + STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA NTStatus = 0xC01E0582 + STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA NTStatus = 0xC01E0583 + STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED NTStatus = 0xC01E0584 + STATUS_GRAPHICS_DDCCI_INVALID_DATA NTStatus = 0xC01E0585 + STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE NTStatus = 0xC01E0586 + STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING NTStatus = 0xC01E0587 + STATUS_GRAPHICS_MCA_INTERNAL_ERROR NTStatus = 0xC01E0588 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND NTStatus = 0xC01E0589 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH NTStatus = 0xC01E058A + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM NTStatus = 0xC01E058B + STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE NTStatus = 0xC01E058C + STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS NTStatus = 0xC01E058D + STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED NTStatus = 0xC01E05E0 + STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME NTStatus = 0xC01E05E1 + STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP NTStatus = 0xC01E05E2 + STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED NTStatus = 0xC01E05E3 + STATUS_GRAPHICS_INVALID_POINTER NTStatus = 0xC01E05E4 + STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE NTStatus = 0xC01E05E5 + STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL NTStatus = 0xC01E05E6 + STATUS_GRAPHICS_INTERNAL_ERROR NTStatus = 0xC01E05E7 + STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS NTStatus = 0xC01E05E8 + STATUS_FVE_LOCKED_VOLUME NTStatus = 0xC0210000 + STATUS_FVE_NOT_ENCRYPTED NTStatus = 0xC0210001 + STATUS_FVE_BAD_INFORMATION NTStatus = 0xC0210002 + STATUS_FVE_TOO_SMALL NTStatus = 0xC0210003 + STATUS_FVE_FAILED_WRONG_FS NTStatus = 0xC0210004 + STATUS_FVE_BAD_PARTITION_SIZE NTStatus = 0xC0210005 + STATUS_FVE_FS_NOT_EXTENDED NTStatus = 0xC0210006 + STATUS_FVE_FS_MOUNTED NTStatus = 0xC0210007 + STATUS_FVE_NO_LICENSE NTStatus = 0xC0210008 + STATUS_FVE_ACTION_NOT_ALLOWED NTStatus = 0xC0210009 + STATUS_FVE_BAD_DATA NTStatus = 0xC021000A + STATUS_FVE_VOLUME_NOT_BOUND NTStatus = 0xC021000B + STATUS_FVE_NOT_DATA_VOLUME NTStatus = 0xC021000C + STATUS_FVE_CONV_READ_ERROR NTStatus = 0xC021000D + STATUS_FVE_CONV_WRITE_ERROR NTStatus = 0xC021000E + STATUS_FVE_OVERLAPPED_UPDATE NTStatus = 0xC021000F + STATUS_FVE_FAILED_SECTOR_SIZE NTStatus = 0xC0210010 + STATUS_FVE_FAILED_AUTHENTICATION NTStatus = 0xC0210011 + STATUS_FVE_NOT_OS_VOLUME NTStatus = 0xC0210012 + STATUS_FVE_KEYFILE_NOT_FOUND NTStatus = 0xC0210013 + STATUS_FVE_KEYFILE_INVALID NTStatus = 0xC0210014 + STATUS_FVE_KEYFILE_NO_VMK NTStatus = 0xC0210015 + STATUS_FVE_TPM_DISABLED NTStatus = 0xC0210016 + STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO NTStatus = 0xC0210017 + STATUS_FVE_TPM_INVALID_PCR NTStatus = 0xC0210018 + STATUS_FVE_TPM_NO_VMK NTStatus = 0xC0210019 + STATUS_FVE_PIN_INVALID NTStatus = 0xC021001A + STATUS_FVE_AUTH_INVALID_APPLICATION NTStatus = 0xC021001B + STATUS_FVE_AUTH_INVALID_CONFIG NTStatus = 0xC021001C + STATUS_FVE_DEBUGGER_ENABLED NTStatus = 0xC021001D + STATUS_FVE_DRY_RUN_FAILED NTStatus = 0xC021001E + STATUS_FVE_BAD_METADATA_POINTER NTStatus = 0xC021001F + STATUS_FVE_OLD_METADATA_COPY NTStatus = 0xC0210020 + STATUS_FVE_REBOOT_REQUIRED NTStatus = 0xC0210021 + STATUS_FVE_RAW_ACCESS NTStatus = 0xC0210022 + STATUS_FVE_RAW_BLOCKED NTStatus = 0xC0210023 + STATUS_FVE_NO_AUTOUNLOCK_MASTER_KEY NTStatus = 0xC0210024 + STATUS_FVE_MOR_FAILED NTStatus = 0xC0210025 + STATUS_FVE_NO_FEATURE_LICENSE NTStatus = 0xC0210026 + STATUS_FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED NTStatus = 0xC0210027 + STATUS_FVE_CONV_RECOVERY_FAILED NTStatus = 0xC0210028 + STATUS_FVE_VIRTUALIZED_SPACE_TOO_BIG NTStatus = 0xC0210029 + STATUS_FVE_INVALID_DATUM_TYPE NTStatus = 0xC021002A + STATUS_FVE_VOLUME_TOO_SMALL NTStatus = 0xC0210030 + STATUS_FVE_ENH_PIN_INVALID NTStatus = 0xC0210031 + STATUS_FVE_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210032 + STATUS_FVE_WIPE_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210033 + STATUS_FVE_NOT_ALLOWED_ON_CSV_STACK NTStatus = 0xC0210034 + STATUS_FVE_NOT_ALLOWED_ON_CLUSTER NTStatus = 0xC0210035 + STATUS_FVE_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING NTStatus = 0xC0210036 + STATUS_FVE_WIPE_CANCEL_NOT_APPLICABLE NTStatus = 0xC0210037 + STATUS_FVE_EDRIVE_DRY_RUN_FAILED NTStatus = 0xC0210038 + STATUS_FVE_SECUREBOOT_DISABLED NTStatus = 0xC0210039 + STATUS_FVE_SECUREBOOT_CONFIG_CHANGE NTStatus = 0xC021003A + STATUS_FVE_DEVICE_LOCKEDOUT NTStatus = 0xC021003B + STATUS_FVE_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT NTStatus = 0xC021003C + STATUS_FVE_NOT_DE_VOLUME NTStatus = 0xC021003D + STATUS_FVE_PROTECTION_DISABLED NTStatus = 0xC021003E + STATUS_FVE_PROTECTION_CANNOT_BE_DISABLED NTStatus = 0xC021003F + STATUS_FVE_OSV_KSR_NOT_ALLOWED NTStatus = 0xC0210040 + STATUS_FWP_CALLOUT_NOT_FOUND NTStatus = 0xC0220001 + STATUS_FWP_CONDITION_NOT_FOUND NTStatus = 0xC0220002 + STATUS_FWP_FILTER_NOT_FOUND NTStatus = 0xC0220003 + STATUS_FWP_LAYER_NOT_FOUND NTStatus = 0xC0220004 + STATUS_FWP_PROVIDER_NOT_FOUND NTStatus = 0xC0220005 + STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND NTStatus = 0xC0220006 + STATUS_FWP_SUBLAYER_NOT_FOUND NTStatus = 0xC0220007 + STATUS_FWP_NOT_FOUND NTStatus = 0xC0220008 + STATUS_FWP_ALREADY_EXISTS NTStatus = 0xC0220009 + STATUS_FWP_IN_USE NTStatus = 0xC022000A + STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS NTStatus = 0xC022000B + STATUS_FWP_WRONG_SESSION NTStatus = 0xC022000C + STATUS_FWP_NO_TXN_IN_PROGRESS NTStatus = 0xC022000D + STATUS_FWP_TXN_IN_PROGRESS NTStatus = 0xC022000E + STATUS_FWP_TXN_ABORTED NTStatus = 0xC022000F + STATUS_FWP_SESSION_ABORTED NTStatus = 0xC0220010 + STATUS_FWP_INCOMPATIBLE_TXN NTStatus = 0xC0220011 + STATUS_FWP_TIMEOUT NTStatus = 0xC0220012 + STATUS_FWP_NET_EVENTS_DISABLED NTStatus = 0xC0220013 + STATUS_FWP_INCOMPATIBLE_LAYER NTStatus = 0xC0220014 + STATUS_FWP_KM_CLIENTS_ONLY NTStatus = 0xC0220015 + STATUS_FWP_LIFETIME_MISMATCH NTStatus = 0xC0220016 + STATUS_FWP_BUILTIN_OBJECT NTStatus = 0xC0220017 + STATUS_FWP_TOO_MANY_CALLOUTS NTStatus = 0xC0220018 + STATUS_FWP_NOTIFICATION_DROPPED NTStatus = 0xC0220019 + STATUS_FWP_TRAFFIC_MISMATCH NTStatus = 0xC022001A + STATUS_FWP_INCOMPATIBLE_SA_STATE NTStatus = 0xC022001B + STATUS_FWP_NULL_POINTER NTStatus = 0xC022001C + STATUS_FWP_INVALID_ENUMERATOR NTStatus = 0xC022001D + STATUS_FWP_INVALID_FLAGS NTStatus = 0xC022001E + STATUS_FWP_INVALID_NET_MASK NTStatus = 0xC022001F + STATUS_FWP_INVALID_RANGE NTStatus = 0xC0220020 + STATUS_FWP_INVALID_INTERVAL NTStatus = 0xC0220021 + STATUS_FWP_ZERO_LENGTH_ARRAY NTStatus = 0xC0220022 + STATUS_FWP_NULL_DISPLAY_NAME NTStatus = 0xC0220023 + STATUS_FWP_INVALID_ACTION_TYPE NTStatus = 0xC0220024 + STATUS_FWP_INVALID_WEIGHT NTStatus = 0xC0220025 + STATUS_FWP_MATCH_TYPE_MISMATCH NTStatus = 0xC0220026 + STATUS_FWP_TYPE_MISMATCH NTStatus = 0xC0220027 + STATUS_FWP_OUT_OF_BOUNDS NTStatus = 0xC0220028 + STATUS_FWP_RESERVED NTStatus = 0xC0220029 + STATUS_FWP_DUPLICATE_CONDITION NTStatus = 0xC022002A + STATUS_FWP_DUPLICATE_KEYMOD NTStatus = 0xC022002B + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002C + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER NTStatus = 0xC022002D + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002E + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT NTStatus = 0xC022002F + STATUS_FWP_INCOMPATIBLE_AUTH_METHOD NTStatus = 0xC0220030 + STATUS_FWP_INCOMPATIBLE_DH_GROUP NTStatus = 0xC0220031 + STATUS_FWP_EM_NOT_SUPPORTED NTStatus = 0xC0220032 + STATUS_FWP_NEVER_MATCH NTStatus = 0xC0220033 + STATUS_FWP_PROVIDER_CONTEXT_MISMATCH NTStatus = 0xC0220034 + STATUS_FWP_INVALID_PARAMETER NTStatus = 0xC0220035 + STATUS_FWP_TOO_MANY_SUBLAYERS NTStatus = 0xC0220036 + STATUS_FWP_CALLOUT_NOTIFICATION_FAILED NTStatus = 0xC0220037 + STATUS_FWP_INVALID_AUTH_TRANSFORM NTStatus = 0xC0220038 + STATUS_FWP_INVALID_CIPHER_TRANSFORM NTStatus = 0xC0220039 + STATUS_FWP_INCOMPATIBLE_CIPHER_TRANSFORM NTStatus = 0xC022003A + STATUS_FWP_INVALID_TRANSFORM_COMBINATION NTStatus = 0xC022003B + STATUS_FWP_DUPLICATE_AUTH_METHOD NTStatus = 0xC022003C + STATUS_FWP_INVALID_TUNNEL_ENDPOINT NTStatus = 0xC022003D + STATUS_FWP_L2_DRIVER_NOT_READY NTStatus = 0xC022003E + STATUS_FWP_KEY_DICTATOR_ALREADY_REGISTERED NTStatus = 0xC022003F + STATUS_FWP_KEY_DICTATION_INVALID_KEYING_MATERIAL NTStatus = 0xC0220040 + STATUS_FWP_CONNECTIONS_DISABLED NTStatus = 0xC0220041 + STATUS_FWP_INVALID_DNS_NAME NTStatus = 0xC0220042 + STATUS_FWP_STILL_ON NTStatus = 0xC0220043 + STATUS_FWP_IKEEXT_NOT_RUNNING NTStatus = 0xC0220044 + STATUS_FWP_TCPIP_NOT_READY NTStatus = 0xC0220100 + STATUS_FWP_INJECT_HANDLE_CLOSING NTStatus = 0xC0220101 + STATUS_FWP_INJECT_HANDLE_STALE NTStatus = 0xC0220102 + STATUS_FWP_CANNOT_PEND NTStatus = 0xC0220103 + STATUS_FWP_DROP_NOICMP NTStatus = 0xC0220104 + STATUS_NDIS_CLOSING NTStatus = 0xC0230002 + STATUS_NDIS_BAD_VERSION NTStatus = 0xC0230004 + STATUS_NDIS_BAD_CHARACTERISTICS NTStatus = 0xC0230005 + STATUS_NDIS_ADAPTER_NOT_FOUND NTStatus = 0xC0230006 + STATUS_NDIS_OPEN_FAILED NTStatus = 0xC0230007 + STATUS_NDIS_DEVICE_FAILED NTStatus = 0xC0230008 + STATUS_NDIS_MULTICAST_FULL NTStatus = 0xC0230009 + STATUS_NDIS_MULTICAST_EXISTS NTStatus = 0xC023000A + STATUS_NDIS_MULTICAST_NOT_FOUND NTStatus = 0xC023000B + STATUS_NDIS_REQUEST_ABORTED NTStatus = 0xC023000C + STATUS_NDIS_RESET_IN_PROGRESS NTStatus = 0xC023000D + STATUS_NDIS_NOT_SUPPORTED NTStatus = 0xC02300BB + STATUS_NDIS_INVALID_PACKET NTStatus = 0xC023000F + STATUS_NDIS_ADAPTER_NOT_READY NTStatus = 0xC0230011 + STATUS_NDIS_INVALID_LENGTH NTStatus = 0xC0230014 + STATUS_NDIS_INVALID_DATA NTStatus = 0xC0230015 + STATUS_NDIS_BUFFER_TOO_SHORT NTStatus = 0xC0230016 + STATUS_NDIS_INVALID_OID NTStatus = 0xC0230017 + STATUS_NDIS_ADAPTER_REMOVED NTStatus = 0xC0230018 + STATUS_NDIS_UNSUPPORTED_MEDIA NTStatus = 0xC0230019 + STATUS_NDIS_GROUP_ADDRESS_IN_USE NTStatus = 0xC023001A + STATUS_NDIS_FILE_NOT_FOUND NTStatus = 0xC023001B + STATUS_NDIS_ERROR_READING_FILE NTStatus = 0xC023001C + STATUS_NDIS_ALREADY_MAPPED NTStatus = 0xC023001D + STATUS_NDIS_RESOURCE_CONFLICT NTStatus = 0xC023001E + STATUS_NDIS_MEDIA_DISCONNECTED NTStatus = 0xC023001F + STATUS_NDIS_INVALID_ADDRESS NTStatus = 0xC0230022 + STATUS_NDIS_INVALID_DEVICE_REQUEST NTStatus = 0xC0230010 + STATUS_NDIS_PAUSED NTStatus = 0xC023002A + STATUS_NDIS_INTERFACE_NOT_FOUND NTStatus = 0xC023002B + STATUS_NDIS_UNSUPPORTED_REVISION NTStatus = 0xC023002C + STATUS_NDIS_INVALID_PORT NTStatus = 0xC023002D + STATUS_NDIS_INVALID_PORT_STATE NTStatus = 0xC023002E + STATUS_NDIS_LOW_POWER_STATE NTStatus = 0xC023002F + STATUS_NDIS_REINIT_REQUIRED NTStatus = 0xC0230030 + STATUS_NDIS_NO_QUEUES NTStatus = 0xC0230031 + STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED NTStatus = 0xC0232000 + STATUS_NDIS_DOT11_MEDIA_IN_USE NTStatus = 0xC0232001 + STATUS_NDIS_DOT11_POWER_STATE_INVALID NTStatus = 0xC0232002 + STATUS_NDIS_PM_WOL_PATTERN_LIST_FULL NTStatus = 0xC0232003 + STATUS_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL NTStatus = 0xC0232004 + STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232005 + STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232006 + STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED NTStatus = 0xC0232007 + STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED NTStatus = 0xC0232008 + STATUS_NDIS_INDICATION_REQUIRED NTStatus = 0x40230001 + STATUS_NDIS_OFFLOAD_POLICY NTStatus = 0xC023100F + STATUS_NDIS_OFFLOAD_CONNECTION_REJECTED NTStatus = 0xC0231012 + STATUS_NDIS_OFFLOAD_PATH_REJECTED NTStatus = 0xC0231013 + STATUS_TPM_ERROR_MASK NTStatus = 0xC0290000 + STATUS_TPM_AUTHFAIL NTStatus = 0xC0290001 + STATUS_TPM_BADINDEX NTStatus = 0xC0290002 + STATUS_TPM_BAD_PARAMETER NTStatus = 0xC0290003 + STATUS_TPM_AUDITFAILURE NTStatus = 0xC0290004 + STATUS_TPM_CLEAR_DISABLED NTStatus = 0xC0290005 + STATUS_TPM_DEACTIVATED NTStatus = 0xC0290006 + STATUS_TPM_DISABLED NTStatus = 0xC0290007 + STATUS_TPM_DISABLED_CMD NTStatus = 0xC0290008 + STATUS_TPM_FAIL NTStatus = 0xC0290009 + STATUS_TPM_BAD_ORDINAL NTStatus = 0xC029000A + STATUS_TPM_INSTALL_DISABLED NTStatus = 0xC029000B + STATUS_TPM_INVALID_KEYHANDLE NTStatus = 0xC029000C + STATUS_TPM_KEYNOTFOUND NTStatus = 0xC029000D + STATUS_TPM_INAPPROPRIATE_ENC NTStatus = 0xC029000E + STATUS_TPM_MIGRATEFAIL NTStatus = 0xC029000F + STATUS_TPM_INVALID_PCR_INFO NTStatus = 0xC0290010 + STATUS_TPM_NOSPACE NTStatus = 0xC0290011 + STATUS_TPM_NOSRK NTStatus = 0xC0290012 + STATUS_TPM_NOTSEALED_BLOB NTStatus = 0xC0290013 + STATUS_TPM_OWNER_SET NTStatus = 0xC0290014 + STATUS_TPM_RESOURCES NTStatus = 0xC0290015 + STATUS_TPM_SHORTRANDOM NTStatus = 0xC0290016 + STATUS_TPM_SIZE NTStatus = 0xC0290017 + STATUS_TPM_WRONGPCRVAL NTStatus = 0xC0290018 + STATUS_TPM_BAD_PARAM_SIZE NTStatus = 0xC0290019 + STATUS_TPM_SHA_THREAD NTStatus = 0xC029001A + STATUS_TPM_SHA_ERROR NTStatus = 0xC029001B + STATUS_TPM_FAILEDSELFTEST NTStatus = 0xC029001C + STATUS_TPM_AUTH2FAIL NTStatus = 0xC029001D + STATUS_TPM_BADTAG NTStatus = 0xC029001E + STATUS_TPM_IOERROR NTStatus = 0xC029001F + STATUS_TPM_ENCRYPT_ERROR NTStatus = 0xC0290020 + STATUS_TPM_DECRYPT_ERROR NTStatus = 0xC0290021 + STATUS_TPM_INVALID_AUTHHANDLE NTStatus = 0xC0290022 + STATUS_TPM_NO_ENDORSEMENT NTStatus = 0xC0290023 + STATUS_TPM_INVALID_KEYUSAGE NTStatus = 0xC0290024 + STATUS_TPM_WRONG_ENTITYTYPE NTStatus = 0xC0290025 + STATUS_TPM_INVALID_POSTINIT NTStatus = 0xC0290026 + STATUS_TPM_INAPPROPRIATE_SIG NTStatus = 0xC0290027 + STATUS_TPM_BAD_KEY_PROPERTY NTStatus = 0xC0290028 + STATUS_TPM_BAD_MIGRATION NTStatus = 0xC0290029 + STATUS_TPM_BAD_SCHEME NTStatus = 0xC029002A + STATUS_TPM_BAD_DATASIZE NTStatus = 0xC029002B + STATUS_TPM_BAD_MODE NTStatus = 0xC029002C + STATUS_TPM_BAD_PRESENCE NTStatus = 0xC029002D + STATUS_TPM_BAD_VERSION NTStatus = 0xC029002E + STATUS_TPM_NO_WRAP_TRANSPORT NTStatus = 0xC029002F + STATUS_TPM_AUDITFAIL_UNSUCCESSFUL NTStatus = 0xC0290030 + STATUS_TPM_AUDITFAIL_SUCCESSFUL NTStatus = 0xC0290031 + STATUS_TPM_NOTRESETABLE NTStatus = 0xC0290032 + STATUS_TPM_NOTLOCAL NTStatus = 0xC0290033 + STATUS_TPM_BAD_TYPE NTStatus = 0xC0290034 + STATUS_TPM_INVALID_RESOURCE NTStatus = 0xC0290035 + STATUS_TPM_NOTFIPS NTStatus = 0xC0290036 + STATUS_TPM_INVALID_FAMILY NTStatus = 0xC0290037 + STATUS_TPM_NO_NV_PERMISSION NTStatus = 0xC0290038 + STATUS_TPM_REQUIRES_SIGN NTStatus = 0xC0290039 + STATUS_TPM_KEY_NOTSUPPORTED NTStatus = 0xC029003A + STATUS_TPM_AUTH_CONFLICT NTStatus = 0xC029003B + STATUS_TPM_AREA_LOCKED NTStatus = 0xC029003C + STATUS_TPM_BAD_LOCALITY NTStatus = 0xC029003D + STATUS_TPM_READ_ONLY NTStatus = 0xC029003E + STATUS_TPM_PER_NOWRITE NTStatus = 0xC029003F + STATUS_TPM_FAMILYCOUNT NTStatus = 0xC0290040 + STATUS_TPM_WRITE_LOCKED NTStatus = 0xC0290041 + STATUS_TPM_BAD_ATTRIBUTES NTStatus = 0xC0290042 + STATUS_TPM_INVALID_STRUCTURE NTStatus = 0xC0290043 + STATUS_TPM_KEY_OWNER_CONTROL NTStatus = 0xC0290044 + STATUS_TPM_BAD_COUNTER NTStatus = 0xC0290045 + STATUS_TPM_NOT_FULLWRITE NTStatus = 0xC0290046 + STATUS_TPM_CONTEXT_GAP NTStatus = 0xC0290047 + STATUS_TPM_MAXNVWRITES NTStatus = 0xC0290048 + STATUS_TPM_NOOPERATOR NTStatus = 0xC0290049 + STATUS_TPM_RESOURCEMISSING NTStatus = 0xC029004A + STATUS_TPM_DELEGATE_LOCK NTStatus = 0xC029004B + STATUS_TPM_DELEGATE_FAMILY NTStatus = 0xC029004C + STATUS_TPM_DELEGATE_ADMIN NTStatus = 0xC029004D + STATUS_TPM_TRANSPORT_NOTEXCLUSIVE NTStatus = 0xC029004E + STATUS_TPM_OWNER_CONTROL NTStatus = 0xC029004F + STATUS_TPM_DAA_RESOURCES NTStatus = 0xC0290050 + STATUS_TPM_DAA_INPUT_DATA0 NTStatus = 0xC0290051 + STATUS_TPM_DAA_INPUT_DATA1 NTStatus = 0xC0290052 + STATUS_TPM_DAA_ISSUER_SETTINGS NTStatus = 0xC0290053 + STATUS_TPM_DAA_TPM_SETTINGS NTStatus = 0xC0290054 + STATUS_TPM_DAA_STAGE NTStatus = 0xC0290055 + STATUS_TPM_DAA_ISSUER_VALIDITY NTStatus = 0xC0290056 + STATUS_TPM_DAA_WRONG_W NTStatus = 0xC0290057 + STATUS_TPM_BAD_HANDLE NTStatus = 0xC0290058 + STATUS_TPM_BAD_DELEGATE NTStatus = 0xC0290059 + STATUS_TPM_BADCONTEXT NTStatus = 0xC029005A + STATUS_TPM_TOOMANYCONTEXTS NTStatus = 0xC029005B + STATUS_TPM_MA_TICKET_SIGNATURE NTStatus = 0xC029005C + STATUS_TPM_MA_DESTINATION NTStatus = 0xC029005D + STATUS_TPM_MA_SOURCE NTStatus = 0xC029005E + STATUS_TPM_MA_AUTHORITY NTStatus = 0xC029005F + STATUS_TPM_PERMANENTEK NTStatus = 0xC0290061 + STATUS_TPM_BAD_SIGNATURE NTStatus = 0xC0290062 + STATUS_TPM_NOCONTEXTSPACE NTStatus = 0xC0290063 + STATUS_TPM_20_E_ASYMMETRIC NTStatus = 0xC0290081 + STATUS_TPM_20_E_ATTRIBUTES NTStatus = 0xC0290082 + STATUS_TPM_20_E_HASH NTStatus = 0xC0290083 + STATUS_TPM_20_E_VALUE NTStatus = 0xC0290084 + STATUS_TPM_20_E_HIERARCHY NTStatus = 0xC0290085 + STATUS_TPM_20_E_KEY_SIZE NTStatus = 0xC0290087 + STATUS_TPM_20_E_MGF NTStatus = 0xC0290088 + STATUS_TPM_20_E_MODE NTStatus = 0xC0290089 + STATUS_TPM_20_E_TYPE NTStatus = 0xC029008A + STATUS_TPM_20_E_HANDLE NTStatus = 0xC029008B + STATUS_TPM_20_E_KDF NTStatus = 0xC029008C + STATUS_TPM_20_E_RANGE NTStatus = 0xC029008D + STATUS_TPM_20_E_AUTH_FAIL NTStatus = 0xC029008E + STATUS_TPM_20_E_NONCE NTStatus = 0xC029008F + STATUS_TPM_20_E_PP NTStatus = 0xC0290090 + STATUS_TPM_20_E_SCHEME NTStatus = 0xC0290092 + STATUS_TPM_20_E_SIZE NTStatus = 0xC0290095 + STATUS_TPM_20_E_SYMMETRIC NTStatus = 0xC0290096 + STATUS_TPM_20_E_TAG NTStatus = 0xC0290097 + STATUS_TPM_20_E_SELECTOR NTStatus = 0xC0290098 + STATUS_TPM_20_E_INSUFFICIENT NTStatus = 0xC029009A + STATUS_TPM_20_E_SIGNATURE NTStatus = 0xC029009B + STATUS_TPM_20_E_KEY NTStatus = 0xC029009C + STATUS_TPM_20_E_POLICY_FAIL NTStatus = 0xC029009D + STATUS_TPM_20_E_INTEGRITY NTStatus = 0xC029009F + STATUS_TPM_20_E_TICKET NTStatus = 0xC02900A0 + STATUS_TPM_20_E_RESERVED_BITS NTStatus = 0xC02900A1 + STATUS_TPM_20_E_BAD_AUTH NTStatus = 0xC02900A2 + STATUS_TPM_20_E_EXPIRED NTStatus = 0xC02900A3 + STATUS_TPM_20_E_POLICY_CC NTStatus = 0xC02900A4 + STATUS_TPM_20_E_BINDING NTStatus = 0xC02900A5 + STATUS_TPM_20_E_CURVE NTStatus = 0xC02900A6 + STATUS_TPM_20_E_ECC_POINT NTStatus = 0xC02900A7 + STATUS_TPM_20_E_INITIALIZE NTStatus = 0xC0290100 + STATUS_TPM_20_E_FAILURE NTStatus = 0xC0290101 + STATUS_TPM_20_E_SEQUENCE NTStatus = 0xC0290103 + STATUS_TPM_20_E_PRIVATE NTStatus = 0xC029010B + STATUS_TPM_20_E_HMAC NTStatus = 0xC0290119 + STATUS_TPM_20_E_DISABLED NTStatus = 0xC0290120 + STATUS_TPM_20_E_EXCLUSIVE NTStatus = 0xC0290121 + STATUS_TPM_20_E_ECC_CURVE NTStatus = 0xC0290123 + STATUS_TPM_20_E_AUTH_TYPE NTStatus = 0xC0290124 + STATUS_TPM_20_E_AUTH_MISSING NTStatus = 0xC0290125 + STATUS_TPM_20_E_POLICY NTStatus = 0xC0290126 + STATUS_TPM_20_E_PCR NTStatus = 0xC0290127 + STATUS_TPM_20_E_PCR_CHANGED NTStatus = 0xC0290128 + STATUS_TPM_20_E_UPGRADE NTStatus = 0xC029012D + STATUS_TPM_20_E_TOO_MANY_CONTEXTS NTStatus = 0xC029012E + STATUS_TPM_20_E_AUTH_UNAVAILABLE NTStatus = 0xC029012F + STATUS_TPM_20_E_REBOOT NTStatus = 0xC0290130 + STATUS_TPM_20_E_UNBALANCED NTStatus = 0xC0290131 + STATUS_TPM_20_E_COMMAND_SIZE NTStatus = 0xC0290142 + STATUS_TPM_20_E_COMMAND_CODE NTStatus = 0xC0290143 + STATUS_TPM_20_E_AUTHSIZE NTStatus = 0xC0290144 + STATUS_TPM_20_E_AUTH_CONTEXT NTStatus = 0xC0290145 + STATUS_TPM_20_E_NV_RANGE NTStatus = 0xC0290146 + STATUS_TPM_20_E_NV_SIZE NTStatus = 0xC0290147 + STATUS_TPM_20_E_NV_LOCKED NTStatus = 0xC0290148 + STATUS_TPM_20_E_NV_AUTHORIZATION NTStatus = 0xC0290149 + STATUS_TPM_20_E_NV_UNINITIALIZED NTStatus = 0xC029014A + STATUS_TPM_20_E_NV_SPACE NTStatus = 0xC029014B + STATUS_TPM_20_E_NV_DEFINED NTStatus = 0xC029014C + STATUS_TPM_20_E_BAD_CONTEXT NTStatus = 0xC0290150 + STATUS_TPM_20_E_CPHASH NTStatus = 0xC0290151 + STATUS_TPM_20_E_PARENT NTStatus = 0xC0290152 + STATUS_TPM_20_E_NEEDS_TEST NTStatus = 0xC0290153 + STATUS_TPM_20_E_NO_RESULT NTStatus = 0xC0290154 + STATUS_TPM_20_E_SENSITIVE NTStatus = 0xC0290155 + STATUS_TPM_COMMAND_BLOCKED NTStatus = 0xC0290400 + STATUS_TPM_INVALID_HANDLE NTStatus = 0xC0290401 + STATUS_TPM_DUPLICATE_VHANDLE NTStatus = 0xC0290402 + STATUS_TPM_EMBEDDED_COMMAND_BLOCKED NTStatus = 0xC0290403 + STATUS_TPM_EMBEDDED_COMMAND_UNSUPPORTED NTStatus = 0xC0290404 + STATUS_TPM_RETRY NTStatus = 0xC0290800 + STATUS_TPM_NEEDS_SELFTEST NTStatus = 0xC0290801 + STATUS_TPM_DOING_SELFTEST NTStatus = 0xC0290802 + STATUS_TPM_DEFEND_LOCK_RUNNING NTStatus = 0xC0290803 + STATUS_TPM_COMMAND_CANCELED NTStatus = 0xC0291001 + STATUS_TPM_TOO_MANY_CONTEXTS NTStatus = 0xC0291002 + STATUS_TPM_NOT_FOUND NTStatus = 0xC0291003 + STATUS_TPM_ACCESS_DENIED NTStatus = 0xC0291004 + STATUS_TPM_INSUFFICIENT_BUFFER NTStatus = 0xC0291005 + STATUS_TPM_PPI_FUNCTION_UNSUPPORTED NTStatus = 0xC0291006 + STATUS_PCP_ERROR_MASK NTStatus = 0xC0292000 + STATUS_PCP_DEVICE_NOT_READY NTStatus = 0xC0292001 + STATUS_PCP_INVALID_HANDLE NTStatus = 0xC0292002 + STATUS_PCP_INVALID_PARAMETER NTStatus = 0xC0292003 + STATUS_PCP_FLAG_NOT_SUPPORTED NTStatus = 0xC0292004 + STATUS_PCP_NOT_SUPPORTED NTStatus = 0xC0292005 + STATUS_PCP_BUFFER_TOO_SMALL NTStatus = 0xC0292006 + STATUS_PCP_INTERNAL_ERROR NTStatus = 0xC0292007 + STATUS_PCP_AUTHENTICATION_FAILED NTStatus = 0xC0292008 + STATUS_PCP_AUTHENTICATION_IGNORED NTStatus = 0xC0292009 + STATUS_PCP_POLICY_NOT_FOUND NTStatus = 0xC029200A + STATUS_PCP_PROFILE_NOT_FOUND NTStatus = 0xC029200B + STATUS_PCP_VALIDATION_FAILED NTStatus = 0xC029200C + STATUS_PCP_DEVICE_NOT_FOUND NTStatus = 0xC029200D + STATUS_PCP_WRONG_PARENT NTStatus = 0xC029200E + STATUS_PCP_KEY_NOT_LOADED NTStatus = 0xC029200F + STATUS_PCP_NO_KEY_CERTIFICATION NTStatus = 0xC0292010 + STATUS_PCP_KEY_NOT_FINALIZED NTStatus = 0xC0292011 + STATUS_PCP_ATTESTATION_CHALLENGE_NOT_SET NTStatus = 0xC0292012 + STATUS_PCP_NOT_PCR_BOUND NTStatus = 0xC0292013 + STATUS_PCP_KEY_ALREADY_FINALIZED NTStatus = 0xC0292014 + STATUS_PCP_KEY_USAGE_POLICY_NOT_SUPPORTED NTStatus = 0xC0292015 + STATUS_PCP_KEY_USAGE_POLICY_INVALID NTStatus = 0xC0292016 + STATUS_PCP_SOFT_KEY_ERROR NTStatus = 0xC0292017 + STATUS_PCP_KEY_NOT_AUTHENTICATED NTStatus = 0xC0292018 + STATUS_PCP_KEY_NOT_AIK NTStatus = 0xC0292019 + STATUS_PCP_KEY_NOT_SIGNING_KEY NTStatus = 0xC029201A + STATUS_PCP_LOCKED_OUT NTStatus = 0xC029201B + STATUS_PCP_CLAIM_TYPE_NOT_SUPPORTED NTStatus = 0xC029201C + STATUS_PCP_TPM_VERSION_NOT_SUPPORTED NTStatus = 0xC029201D + STATUS_PCP_BUFFER_LENGTH_MISMATCH NTStatus = 0xC029201E + STATUS_PCP_IFX_RSA_KEY_CREATION_BLOCKED NTStatus = 0xC029201F + STATUS_PCP_TICKET_MISSING NTStatus = 0xC0292020 + STATUS_PCP_RAW_POLICY_NOT_SUPPORTED NTStatus = 0xC0292021 + STATUS_PCP_KEY_HANDLE_INVALIDATED NTStatus = 0xC0292022 + STATUS_PCP_UNSUPPORTED_PSS_SALT NTStatus = 0x40292023 + STATUS_RTPM_CONTEXT_CONTINUE NTStatus = 0x00293000 + STATUS_RTPM_CONTEXT_COMPLETE NTStatus = 0x00293001 + STATUS_RTPM_NO_RESULT NTStatus = 0xC0293002 + STATUS_RTPM_PCR_READ_INCOMPLETE NTStatus = 0xC0293003 + STATUS_RTPM_INVALID_CONTEXT NTStatus = 0xC0293004 + STATUS_RTPM_UNSUPPORTED_CMD NTStatus = 0xC0293005 + STATUS_TPM_ZERO_EXHAUST_ENABLED NTStatus = 0xC0294000 + STATUS_HV_INVALID_HYPERCALL_CODE NTStatus = 0xC0350002 + STATUS_HV_INVALID_HYPERCALL_INPUT NTStatus = 0xC0350003 + STATUS_HV_INVALID_ALIGNMENT NTStatus = 0xC0350004 + STATUS_HV_INVALID_PARAMETER NTStatus = 0xC0350005 + STATUS_HV_ACCESS_DENIED NTStatus = 0xC0350006 + STATUS_HV_INVALID_PARTITION_STATE NTStatus = 0xC0350007 + STATUS_HV_OPERATION_DENIED NTStatus = 0xC0350008 + STATUS_HV_UNKNOWN_PROPERTY NTStatus = 0xC0350009 + STATUS_HV_PROPERTY_VALUE_OUT_OF_RANGE NTStatus = 0xC035000A + STATUS_HV_INSUFFICIENT_MEMORY NTStatus = 0xC035000B + STATUS_HV_PARTITION_TOO_DEEP NTStatus = 0xC035000C + STATUS_HV_INVALID_PARTITION_ID NTStatus = 0xC035000D + STATUS_HV_INVALID_VP_INDEX NTStatus = 0xC035000E + STATUS_HV_INVALID_PORT_ID NTStatus = 0xC0350011 + STATUS_HV_INVALID_CONNECTION_ID NTStatus = 0xC0350012 + STATUS_HV_INSUFFICIENT_BUFFERS NTStatus = 0xC0350013 + STATUS_HV_NOT_ACKNOWLEDGED NTStatus = 0xC0350014 + STATUS_HV_INVALID_VP_STATE NTStatus = 0xC0350015 + STATUS_HV_ACKNOWLEDGED NTStatus = 0xC0350016 + STATUS_HV_INVALID_SAVE_RESTORE_STATE NTStatus = 0xC0350017 + STATUS_HV_INVALID_SYNIC_STATE NTStatus = 0xC0350018 + STATUS_HV_OBJECT_IN_USE NTStatus = 0xC0350019 + STATUS_HV_INVALID_PROXIMITY_DOMAIN_INFO NTStatus = 0xC035001A + STATUS_HV_NO_DATA NTStatus = 0xC035001B + STATUS_HV_INACTIVE NTStatus = 0xC035001C + STATUS_HV_NO_RESOURCES NTStatus = 0xC035001D + STATUS_HV_FEATURE_UNAVAILABLE NTStatus = 0xC035001E + STATUS_HV_INSUFFICIENT_BUFFER NTStatus = 0xC0350033 + STATUS_HV_INSUFFICIENT_DEVICE_DOMAINS NTStatus = 0xC0350038 + STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003C + STATUS_HV_CPUID_XSAVE_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003D + STATUS_HV_PROCESSOR_STARTUP_TIMEOUT NTStatus = 0xC035003E + STATUS_HV_SMX_ENABLED NTStatus = 0xC035003F + STATUS_HV_INVALID_LP_INDEX NTStatus = 0xC0350041 + STATUS_HV_INVALID_REGISTER_VALUE NTStatus = 0xC0350050 + STATUS_HV_INVALID_VTL_STATE NTStatus = 0xC0350051 + STATUS_HV_NX_NOT_DETECTED NTStatus = 0xC0350055 + STATUS_HV_INVALID_DEVICE_ID NTStatus = 0xC0350057 + STATUS_HV_INVALID_DEVICE_STATE NTStatus = 0xC0350058 + STATUS_HV_PENDING_PAGE_REQUESTS NTStatus = 0x00350059 + STATUS_HV_PAGE_REQUEST_INVALID NTStatus = 0xC0350060 + STATUS_HV_INVALID_CPU_GROUP_ID NTStatus = 0xC035006F + STATUS_HV_INVALID_CPU_GROUP_STATE NTStatus = 0xC0350070 + STATUS_HV_OPERATION_FAILED NTStatus = 0xC0350071 + STATUS_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE NTStatus = 0xC0350072 + STATUS_HV_INSUFFICIENT_ROOT_MEMORY NTStatus = 0xC0350073 + STATUS_HV_NOT_PRESENT NTStatus = 0xC0351000 + STATUS_VID_DUPLICATE_HANDLER NTStatus = 0xC0370001 + STATUS_VID_TOO_MANY_HANDLERS NTStatus = 0xC0370002 + STATUS_VID_QUEUE_FULL NTStatus = 0xC0370003 + STATUS_VID_HANDLER_NOT_PRESENT NTStatus = 0xC0370004 + STATUS_VID_INVALID_OBJECT_NAME NTStatus = 0xC0370005 + STATUS_VID_PARTITION_NAME_TOO_LONG NTStatus = 0xC0370006 + STATUS_VID_MESSAGE_QUEUE_NAME_TOO_LONG NTStatus = 0xC0370007 + STATUS_VID_PARTITION_ALREADY_EXISTS NTStatus = 0xC0370008 + STATUS_VID_PARTITION_DOES_NOT_EXIST NTStatus = 0xC0370009 + STATUS_VID_PARTITION_NAME_NOT_FOUND NTStatus = 0xC037000A + STATUS_VID_MESSAGE_QUEUE_ALREADY_EXISTS NTStatus = 0xC037000B + STATUS_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT NTStatus = 0xC037000C + STATUS_VID_MB_STILL_REFERENCED NTStatus = 0xC037000D + STATUS_VID_CHILD_GPA_PAGE_SET_CORRUPTED NTStatus = 0xC037000E + STATUS_VID_INVALID_NUMA_SETTINGS NTStatus = 0xC037000F + STATUS_VID_INVALID_NUMA_NODE_INDEX NTStatus = 0xC0370010 + STATUS_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED NTStatus = 0xC0370011 + STATUS_VID_INVALID_MEMORY_BLOCK_HANDLE NTStatus = 0xC0370012 + STATUS_VID_PAGE_RANGE_OVERFLOW NTStatus = 0xC0370013 + STATUS_VID_INVALID_MESSAGE_QUEUE_HANDLE NTStatus = 0xC0370014 + STATUS_VID_INVALID_GPA_RANGE_HANDLE NTStatus = 0xC0370015 + STATUS_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE NTStatus = 0xC0370016 + STATUS_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED NTStatus = 0xC0370017 + STATUS_VID_INVALID_PPM_HANDLE NTStatus = 0xC0370018 + STATUS_VID_MBPS_ARE_LOCKED NTStatus = 0xC0370019 + STATUS_VID_MESSAGE_QUEUE_CLOSED NTStatus = 0xC037001A + STATUS_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED NTStatus = 0xC037001B + STATUS_VID_STOP_PENDING NTStatus = 0xC037001C + STATUS_VID_INVALID_PROCESSOR_STATE NTStatus = 0xC037001D + STATUS_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT NTStatus = 0xC037001E + STATUS_VID_KM_INTERFACE_ALREADY_INITIALIZED NTStatus = 0xC037001F + STATUS_VID_MB_PROPERTY_ALREADY_SET_RESET NTStatus = 0xC0370020 + STATUS_VID_MMIO_RANGE_DESTROYED NTStatus = 0xC0370021 + STATUS_VID_INVALID_CHILD_GPA_PAGE_SET NTStatus = 0xC0370022 + STATUS_VID_RESERVE_PAGE_SET_IS_BEING_USED NTStatus = 0xC0370023 + STATUS_VID_RESERVE_PAGE_SET_TOO_SMALL NTStatus = 0xC0370024 + STATUS_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE NTStatus = 0xC0370025 + STATUS_VID_MBP_COUNT_EXCEEDED_LIMIT NTStatus = 0xC0370026 + STATUS_VID_SAVED_STATE_CORRUPT NTStatus = 0xC0370027 + STATUS_VID_SAVED_STATE_UNRECOGNIZED_ITEM NTStatus = 0xC0370028 + STATUS_VID_SAVED_STATE_INCOMPATIBLE NTStatus = 0xC0370029 + STATUS_VID_VTL_ACCESS_DENIED NTStatus = 0xC037002A + STATUS_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED NTStatus = 0x80370001 + STATUS_IPSEC_BAD_SPI NTStatus = 0xC0360001 + STATUS_IPSEC_SA_LIFETIME_EXPIRED NTStatus = 0xC0360002 + STATUS_IPSEC_WRONG_SA NTStatus = 0xC0360003 + STATUS_IPSEC_REPLAY_CHECK_FAILED NTStatus = 0xC0360004 + STATUS_IPSEC_INVALID_PACKET NTStatus = 0xC0360005 + STATUS_IPSEC_INTEGRITY_CHECK_FAILED NTStatus = 0xC0360006 + STATUS_IPSEC_CLEAR_TEXT_DROP NTStatus = 0xC0360007 + STATUS_IPSEC_AUTH_FIREWALL_DROP NTStatus = 0xC0360008 + STATUS_IPSEC_THROTTLE_DROP NTStatus = 0xC0360009 + STATUS_IPSEC_DOSP_BLOCK NTStatus = 0xC0368000 + STATUS_IPSEC_DOSP_RECEIVED_MULTICAST NTStatus = 0xC0368001 + STATUS_IPSEC_DOSP_INVALID_PACKET NTStatus = 0xC0368002 + STATUS_IPSEC_DOSP_STATE_LOOKUP_FAILED NTStatus = 0xC0368003 + STATUS_IPSEC_DOSP_MAX_ENTRIES NTStatus = 0xC0368004 + STATUS_IPSEC_DOSP_KEYMOD_NOT_ALLOWED NTStatus = 0xC0368005 + STATUS_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES NTStatus = 0xC0368006 + STATUS_VOLMGR_INCOMPLETE_REGENERATION NTStatus = 0x80380001 + STATUS_VOLMGR_INCOMPLETE_DISK_MIGRATION NTStatus = 0x80380002 + STATUS_VOLMGR_DATABASE_FULL NTStatus = 0xC0380001 + STATUS_VOLMGR_DISK_CONFIGURATION_CORRUPTED NTStatus = 0xC0380002 + STATUS_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC NTStatus = 0xC0380003 + STATUS_VOLMGR_PACK_CONFIG_UPDATE_FAILED NTStatus = 0xC0380004 + STATUS_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME NTStatus = 0xC0380005 + STATUS_VOLMGR_DISK_DUPLICATE NTStatus = 0xC0380006 + STATUS_VOLMGR_DISK_DYNAMIC NTStatus = 0xC0380007 + STATUS_VOLMGR_DISK_ID_INVALID NTStatus = 0xC0380008 + STATUS_VOLMGR_DISK_INVALID NTStatus = 0xC0380009 + STATUS_VOLMGR_DISK_LAST_VOTER NTStatus = 0xC038000A + STATUS_VOLMGR_DISK_LAYOUT_INVALID NTStatus = 0xC038000B + STATUS_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS NTStatus = 0xC038000C + STATUS_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED NTStatus = 0xC038000D + STATUS_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL NTStatus = 0xC038000E + STATUS_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS NTStatus = 0xC038000F + STATUS_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS NTStatus = 0xC0380010 + STATUS_VOLMGR_DISK_MISSING NTStatus = 0xC0380011 + STATUS_VOLMGR_DISK_NOT_EMPTY NTStatus = 0xC0380012 + STATUS_VOLMGR_DISK_NOT_ENOUGH_SPACE NTStatus = 0xC0380013 + STATUS_VOLMGR_DISK_REVECTORING_FAILED NTStatus = 0xC0380014 + STATUS_VOLMGR_DISK_SECTOR_SIZE_INVALID NTStatus = 0xC0380015 + STATUS_VOLMGR_DISK_SET_NOT_CONTAINED NTStatus = 0xC0380016 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS NTStatus = 0xC0380017 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES NTStatus = 0xC0380018 + STATUS_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED NTStatus = 0xC0380019 + STATUS_VOLMGR_EXTENT_ALREADY_USED NTStatus = 0xC038001A + STATUS_VOLMGR_EXTENT_NOT_CONTIGUOUS NTStatus = 0xC038001B + STATUS_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION NTStatus = 0xC038001C + STATUS_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED NTStatus = 0xC038001D + STATUS_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION NTStatus = 0xC038001E + STATUS_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH NTStatus = 0xC038001F + STATUS_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED NTStatus = 0xC0380020 + STATUS_VOLMGR_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0380021 + STATUS_VOLMGR_MAXIMUM_REGISTERED_USERS NTStatus = 0xC0380022 + STATUS_VOLMGR_MEMBER_IN_SYNC NTStatus = 0xC0380023 + STATUS_VOLMGR_MEMBER_INDEX_DUPLICATE NTStatus = 0xC0380024 + STATUS_VOLMGR_MEMBER_INDEX_INVALID NTStatus = 0xC0380025 + STATUS_VOLMGR_MEMBER_MISSING NTStatus = 0xC0380026 + STATUS_VOLMGR_MEMBER_NOT_DETACHED NTStatus = 0xC0380027 + STATUS_VOLMGR_MEMBER_REGENERATING NTStatus = 0xC0380028 + STATUS_VOLMGR_ALL_DISKS_FAILED NTStatus = 0xC0380029 + STATUS_VOLMGR_NO_REGISTERED_USERS NTStatus = 0xC038002A + STATUS_VOLMGR_NO_SUCH_USER NTStatus = 0xC038002B + STATUS_VOLMGR_NOTIFICATION_RESET NTStatus = 0xC038002C + STATUS_VOLMGR_NUMBER_OF_MEMBERS_INVALID NTStatus = 0xC038002D + STATUS_VOLMGR_NUMBER_OF_PLEXES_INVALID NTStatus = 0xC038002E + STATUS_VOLMGR_PACK_DUPLICATE NTStatus = 0xC038002F + STATUS_VOLMGR_PACK_ID_INVALID NTStatus = 0xC0380030 + STATUS_VOLMGR_PACK_INVALID NTStatus = 0xC0380031 + STATUS_VOLMGR_PACK_NAME_INVALID NTStatus = 0xC0380032 + STATUS_VOLMGR_PACK_OFFLINE NTStatus = 0xC0380033 + STATUS_VOLMGR_PACK_HAS_QUORUM NTStatus = 0xC0380034 + STATUS_VOLMGR_PACK_WITHOUT_QUORUM NTStatus = 0xC0380035 + STATUS_VOLMGR_PARTITION_STYLE_INVALID NTStatus = 0xC0380036 + STATUS_VOLMGR_PARTITION_UPDATE_FAILED NTStatus = 0xC0380037 + STATUS_VOLMGR_PLEX_IN_SYNC NTStatus = 0xC0380038 + STATUS_VOLMGR_PLEX_INDEX_DUPLICATE NTStatus = 0xC0380039 + STATUS_VOLMGR_PLEX_INDEX_INVALID NTStatus = 0xC038003A + STATUS_VOLMGR_PLEX_LAST_ACTIVE NTStatus = 0xC038003B + STATUS_VOLMGR_PLEX_MISSING NTStatus = 0xC038003C + STATUS_VOLMGR_PLEX_REGENERATING NTStatus = 0xC038003D + STATUS_VOLMGR_PLEX_TYPE_INVALID NTStatus = 0xC038003E + STATUS_VOLMGR_PLEX_NOT_RAID5 NTStatus = 0xC038003F + STATUS_VOLMGR_PLEX_NOT_SIMPLE NTStatus = 0xC0380040 + STATUS_VOLMGR_STRUCTURE_SIZE_INVALID NTStatus = 0xC0380041 + STATUS_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS NTStatus = 0xC0380042 + STATUS_VOLMGR_TRANSACTION_IN_PROGRESS NTStatus = 0xC0380043 + STATUS_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE NTStatus = 0xC0380044 + STATUS_VOLMGR_VOLUME_CONTAINS_MISSING_DISK NTStatus = 0xC0380045 + STATUS_VOLMGR_VOLUME_ID_INVALID NTStatus = 0xC0380046 + STATUS_VOLMGR_VOLUME_LENGTH_INVALID NTStatus = 0xC0380047 + STATUS_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE NTStatus = 0xC0380048 + STATUS_VOLMGR_VOLUME_NOT_MIRRORED NTStatus = 0xC0380049 + STATUS_VOLMGR_VOLUME_NOT_RETAINED NTStatus = 0xC038004A + STATUS_VOLMGR_VOLUME_OFFLINE NTStatus = 0xC038004B + STATUS_VOLMGR_VOLUME_RETAINED NTStatus = 0xC038004C + STATUS_VOLMGR_NUMBER_OF_EXTENTS_INVALID NTStatus = 0xC038004D + STATUS_VOLMGR_DIFFERENT_SECTOR_SIZE NTStatus = 0xC038004E + STATUS_VOLMGR_BAD_BOOT_DISK NTStatus = 0xC038004F + STATUS_VOLMGR_PACK_CONFIG_OFFLINE NTStatus = 0xC0380050 + STATUS_VOLMGR_PACK_CONFIG_ONLINE NTStatus = 0xC0380051 + STATUS_VOLMGR_NOT_PRIMARY_PACK NTStatus = 0xC0380052 + STATUS_VOLMGR_PACK_LOG_UPDATE_FAILED NTStatus = 0xC0380053 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID NTStatus = 0xC0380054 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID NTStatus = 0xC0380055 + STATUS_VOLMGR_VOLUME_MIRRORED NTStatus = 0xC0380056 + STATUS_VOLMGR_PLEX_NOT_SIMPLE_SPANNED NTStatus = 0xC0380057 + STATUS_VOLMGR_NO_VALID_LOG_COPIES NTStatus = 0xC0380058 + STATUS_VOLMGR_PRIMARY_PACK_PRESENT NTStatus = 0xC0380059 + STATUS_VOLMGR_NUMBER_OF_DISKS_INVALID NTStatus = 0xC038005A + STATUS_VOLMGR_MIRROR_NOT_SUPPORTED NTStatus = 0xC038005B + STATUS_VOLMGR_RAID5_NOT_SUPPORTED NTStatus = 0xC038005C + STATUS_BCD_NOT_ALL_ENTRIES_IMPORTED NTStatus = 0x80390001 + STATUS_BCD_TOO_MANY_ELEMENTS NTStatus = 0xC0390002 + STATUS_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED NTStatus = 0x80390003 + STATUS_VHD_DRIVE_FOOTER_MISSING NTStatus = 0xC03A0001 + STATUS_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH NTStatus = 0xC03A0002 + STATUS_VHD_DRIVE_FOOTER_CORRUPT NTStatus = 0xC03A0003 + STATUS_VHD_FORMAT_UNKNOWN NTStatus = 0xC03A0004 + STATUS_VHD_FORMAT_UNSUPPORTED_VERSION NTStatus = 0xC03A0005 + STATUS_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH NTStatus = 0xC03A0006 + STATUS_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION NTStatus = 0xC03A0007 + STATUS_VHD_SPARSE_HEADER_CORRUPT NTStatus = 0xC03A0008 + STATUS_VHD_BLOCK_ALLOCATION_FAILURE NTStatus = 0xC03A0009 + STATUS_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT NTStatus = 0xC03A000A + STATUS_VHD_INVALID_BLOCK_SIZE NTStatus = 0xC03A000B + STATUS_VHD_BITMAP_MISMATCH NTStatus = 0xC03A000C + STATUS_VHD_PARENT_VHD_NOT_FOUND NTStatus = 0xC03A000D + STATUS_VHD_CHILD_PARENT_ID_MISMATCH NTStatus = 0xC03A000E + STATUS_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH NTStatus = 0xC03A000F + STATUS_VHD_METADATA_READ_FAILURE NTStatus = 0xC03A0010 + STATUS_VHD_METADATA_WRITE_FAILURE NTStatus = 0xC03A0011 + STATUS_VHD_INVALID_SIZE NTStatus = 0xC03A0012 + STATUS_VHD_INVALID_FILE_SIZE NTStatus = 0xC03A0013 + STATUS_VIRTDISK_PROVIDER_NOT_FOUND NTStatus = 0xC03A0014 + STATUS_VIRTDISK_NOT_VIRTUAL_DISK NTStatus = 0xC03A0015 + STATUS_VHD_PARENT_VHD_ACCESS_DENIED NTStatus = 0xC03A0016 + STATUS_VHD_CHILD_PARENT_SIZE_MISMATCH NTStatus = 0xC03A0017 + STATUS_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED NTStatus = 0xC03A0018 + STATUS_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT NTStatus = 0xC03A0019 + STATUS_VIRTUAL_DISK_LIMITATION NTStatus = 0xC03A001A + STATUS_VHD_INVALID_TYPE NTStatus = 0xC03A001B + STATUS_VHD_INVALID_STATE NTStatus = 0xC03A001C + STATUS_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE NTStatus = 0xC03A001D + STATUS_VIRTDISK_DISK_ALREADY_OWNED NTStatus = 0xC03A001E + STATUS_VIRTDISK_DISK_ONLINE_AND_WRITABLE NTStatus = 0xC03A001F + STATUS_CTLOG_TRACKING_NOT_INITIALIZED NTStatus = 0xC03A0020 + STATUS_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE NTStatus = 0xC03A0021 + STATUS_CTLOG_VHD_CHANGED_OFFLINE NTStatus = 0xC03A0022 + STATUS_CTLOG_INVALID_TRACKING_STATE NTStatus = 0xC03A0023 + STATUS_CTLOG_INCONSISTENT_TRACKING_FILE NTStatus = 0xC03A0024 + STATUS_VHD_METADATA_FULL NTStatus = 0xC03A0028 + STATUS_VHD_INVALID_CHANGE_TRACKING_ID NTStatus = 0xC03A0029 + STATUS_VHD_CHANGE_TRACKING_DISABLED NTStatus = 0xC03A002A + STATUS_VHD_MISSING_CHANGE_TRACKING_INFORMATION NTStatus = 0xC03A0030 + STATUS_VHD_RESIZE_WOULD_TRUNCATE_DATA NTStatus = 0xC03A0031 + STATUS_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0032 + STATUS_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0033 + STATUS_QUERY_STORAGE_ERROR NTStatus = 0x803A0001 + STATUS_GDI_HANDLE_LEAK NTStatus = 0x803F0001 + STATUS_RKF_KEY_NOT_FOUND NTStatus = 0xC0400001 + STATUS_RKF_DUPLICATE_KEY NTStatus = 0xC0400002 + STATUS_RKF_BLOB_FULL NTStatus = 0xC0400003 + STATUS_RKF_STORE_FULL NTStatus = 0xC0400004 + STATUS_RKF_FILE_BLOCKED NTStatus = 0xC0400005 + STATUS_RKF_ACTIVE_KEY NTStatus = 0xC0400006 + STATUS_RDBSS_RESTART_OPERATION NTStatus = 0xC0410001 + STATUS_RDBSS_CONTINUE_OPERATION NTStatus = 0xC0410002 + STATUS_RDBSS_POST_OPERATION NTStatus = 0xC0410003 + STATUS_RDBSS_RETRY_LOOKUP NTStatus = 0xC0410004 + STATUS_BTH_ATT_INVALID_HANDLE NTStatus = 0xC0420001 + STATUS_BTH_ATT_READ_NOT_PERMITTED NTStatus = 0xC0420002 + STATUS_BTH_ATT_WRITE_NOT_PERMITTED NTStatus = 0xC0420003 + STATUS_BTH_ATT_INVALID_PDU NTStatus = 0xC0420004 + STATUS_BTH_ATT_INSUFFICIENT_AUTHENTICATION NTStatus = 0xC0420005 + STATUS_BTH_ATT_REQUEST_NOT_SUPPORTED NTStatus = 0xC0420006 + STATUS_BTH_ATT_INVALID_OFFSET NTStatus = 0xC0420007 + STATUS_BTH_ATT_INSUFFICIENT_AUTHORIZATION NTStatus = 0xC0420008 + STATUS_BTH_ATT_PREPARE_QUEUE_FULL NTStatus = 0xC0420009 + STATUS_BTH_ATT_ATTRIBUTE_NOT_FOUND NTStatus = 0xC042000A + STATUS_BTH_ATT_ATTRIBUTE_NOT_LONG NTStatus = 0xC042000B + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE NTStatus = 0xC042000C + STATUS_BTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH NTStatus = 0xC042000D + STATUS_BTH_ATT_UNLIKELY NTStatus = 0xC042000E + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION NTStatus = 0xC042000F + STATUS_BTH_ATT_UNSUPPORTED_GROUP_TYPE NTStatus = 0xC0420010 + STATUS_BTH_ATT_INSUFFICIENT_RESOURCES NTStatus = 0xC0420011 + STATUS_BTH_ATT_UNKNOWN_ERROR NTStatus = 0xC0421000 + STATUS_SECUREBOOT_ROLLBACK_DETECTED NTStatus = 0xC0430001 + STATUS_SECUREBOOT_POLICY_VIOLATION NTStatus = 0xC0430002 + STATUS_SECUREBOOT_INVALID_POLICY NTStatus = 0xC0430003 + STATUS_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND NTStatus = 0xC0430004 + STATUS_SECUREBOOT_POLICY_NOT_SIGNED NTStatus = 0xC0430005 + STATUS_SECUREBOOT_NOT_ENABLED NTStatus = 0x80430006 + STATUS_SECUREBOOT_FILE_REPLACED NTStatus = 0xC0430007 + STATUS_SECUREBOOT_POLICY_NOT_AUTHORIZED NTStatus = 0xC0430008 + STATUS_SECUREBOOT_POLICY_UNKNOWN NTStatus = 0xC0430009 + STATUS_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION NTStatus = 0xC043000A + STATUS_SECUREBOOT_PLATFORM_ID_MISMATCH NTStatus = 0xC043000B + STATUS_SECUREBOOT_POLICY_ROLLBACK_DETECTED NTStatus = 0xC043000C + STATUS_SECUREBOOT_POLICY_UPGRADE_MISMATCH NTStatus = 0xC043000D + STATUS_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING NTStatus = 0xC043000E + STATUS_SECUREBOOT_NOT_BASE_POLICY NTStatus = 0xC043000F + STATUS_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY NTStatus = 0xC0430010 + STATUS_PLATFORM_MANIFEST_NOT_AUTHORIZED NTStatus = 0xC0EB0001 + STATUS_PLATFORM_MANIFEST_INVALID NTStatus = 0xC0EB0002 + STATUS_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED NTStatus = 0xC0EB0003 + STATUS_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED NTStatus = 0xC0EB0004 + STATUS_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND NTStatus = 0xC0EB0005 + STATUS_PLATFORM_MANIFEST_NOT_ACTIVE NTStatus = 0xC0EB0006 + STATUS_PLATFORM_MANIFEST_NOT_SIGNED NTStatus = 0xC0EB0007 + STATUS_SYSTEM_INTEGRITY_ROLLBACK_DETECTED NTStatus = 0xC0E90001 + STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION NTStatus = 0xC0E90002 + STATUS_SYSTEM_INTEGRITY_INVALID_POLICY NTStatus = 0xC0E90003 + STATUS_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED NTStatus = 0xC0E90004 + STATUS_SYSTEM_INTEGRITY_TOO_MANY_POLICIES NTStatus = 0xC0E90005 + STATUS_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED NTStatus = 0xC0E90006 + STATUS_NO_APPLICABLE_APP_LICENSES_FOUND NTStatus = 0xC0EA0001 + STATUS_CLIP_LICENSE_NOT_FOUND NTStatus = 0xC0EA0002 + STATUS_CLIP_DEVICE_LICENSE_MISSING NTStatus = 0xC0EA0003 + STATUS_CLIP_LICENSE_INVALID_SIGNATURE NTStatus = 0xC0EA0004 + STATUS_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID NTStatus = 0xC0EA0005 + STATUS_CLIP_LICENSE_EXPIRED NTStatus = 0xC0EA0006 + STATUS_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE NTStatus = 0xC0EA0007 + STATUS_CLIP_LICENSE_NOT_SIGNED NTStatus = 0xC0EA0008 + STATUS_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE NTStatus = 0xC0EA0009 + STATUS_CLIP_LICENSE_DEVICE_ID_MISMATCH NTStatus = 0xC0EA000A + STATUS_AUDIO_ENGINE_NODE_NOT_FOUND NTStatus = 0xC0440001 + STATUS_HDAUDIO_EMPTY_CONNECTION_LIST NTStatus = 0xC0440002 + STATUS_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED NTStatus = 0xC0440003 + STATUS_HDAUDIO_NO_LOGICAL_DEVICES_CREATED NTStatus = 0xC0440004 + STATUS_HDAUDIO_NULL_LINKED_LIST_ENTRY NTStatus = 0xC0440005 + STATUS_SPACES_REPAIRED NTStatus = 0x00E70000 + STATUS_SPACES_PAUSE NTStatus = 0x00E70001 + STATUS_SPACES_COMPLETE NTStatus = 0x00E70002 + STATUS_SPACES_REDIRECT NTStatus = 0x00E70003 + STATUS_SPACES_FAULT_DOMAIN_TYPE_INVALID NTStatus = 0xC0E70001 + STATUS_SPACES_RESILIENCY_TYPE_INVALID NTStatus = 0xC0E70003 + STATUS_SPACES_DRIVE_SECTOR_SIZE_INVALID NTStatus = 0xC0E70004 + STATUS_SPACES_DRIVE_REDUNDANCY_INVALID NTStatus = 0xC0E70006 + STATUS_SPACES_NUMBER_OF_DATA_COPIES_INVALID NTStatus = 0xC0E70007 + STATUS_SPACES_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0E70009 + STATUS_SPACES_NUMBER_OF_COLUMNS_INVALID NTStatus = 0xC0E7000A + STATUS_SPACES_NOT_ENOUGH_DRIVES NTStatus = 0xC0E7000B + STATUS_SPACES_EXTENDED_ERROR NTStatus = 0xC0E7000C + STATUS_SPACES_PROVISIONING_TYPE_INVALID NTStatus = 0xC0E7000D + STATUS_SPACES_ALLOCATION_SIZE_INVALID NTStatus = 0xC0E7000E + STATUS_SPACES_ENCLOSURE_AWARE_INVALID NTStatus = 0xC0E7000F + STATUS_SPACES_WRITE_CACHE_SIZE_INVALID NTStatus = 0xC0E70010 + STATUS_SPACES_NUMBER_OF_GROUPS_INVALID NTStatus = 0xC0E70011 + STATUS_SPACES_DRIVE_OPERATIONAL_STATE_INVALID NTStatus = 0xC0E70012 + STATUS_SPACES_UPDATE_COLUMN_STATE NTStatus = 0xC0E70013 + STATUS_SPACES_MAP_REQUIRED NTStatus = 0xC0E70014 + STATUS_SPACES_UNSUPPORTED_VERSION NTStatus = 0xC0E70015 + STATUS_SPACES_CORRUPT_METADATA NTStatus = 0xC0E70016 + STATUS_SPACES_DRT_FULL NTStatus = 0xC0E70017 + STATUS_SPACES_INCONSISTENCY NTStatus = 0xC0E70018 + STATUS_SPACES_LOG_NOT_READY NTStatus = 0xC0E70019 + STATUS_SPACES_NO_REDUNDANCY NTStatus = 0xC0E7001A + STATUS_SPACES_DRIVE_NOT_READY NTStatus = 0xC0E7001B + STATUS_SPACES_DRIVE_SPLIT NTStatus = 0xC0E7001C + STATUS_SPACES_DRIVE_LOST_DATA NTStatus = 0xC0E7001D + STATUS_SPACES_ENTRY_INCOMPLETE NTStatus = 0xC0E7001E + STATUS_SPACES_ENTRY_INVALID NTStatus = 0xC0E7001F + STATUS_SPACES_MARK_DIRTY NTStatus = 0xC0E70020 + STATUS_VOLSNAP_BOOTFILE_NOT_VALID NTStatus = 0xC0500003 + STATUS_VOLSNAP_ACTIVATION_TIMEOUT NTStatus = 0xC0500004 + STATUS_IO_PREEMPTED NTStatus = 0xC0510001 + STATUS_SVHDX_ERROR_STORED NTStatus = 0xC05C0000 + STATUS_SVHDX_ERROR_NOT_AVAILABLE NTStatus = 0xC05CFF00 + STATUS_SVHDX_UNIT_ATTENTION_AVAILABLE NTStatus = 0xC05CFF01 + STATUS_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED NTStatus = 0xC05CFF02 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED NTStatus = 0xC05CFF03 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED NTStatus = 0xC05CFF04 + STATUS_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED NTStatus = 0xC05CFF05 + STATUS_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED NTStatus = 0xC05CFF06 + STATUS_SVHDX_RESERVATION_CONFLICT NTStatus = 0xC05CFF07 + STATUS_SVHDX_WRONG_FILE_TYPE NTStatus = 0xC05CFF08 + STATUS_SVHDX_VERSION_MISMATCH NTStatus = 0xC05CFF09 + STATUS_VHD_SHARED NTStatus = 0xC05CFF0A + STATUS_SVHDX_NO_INITIATOR NTStatus = 0xC05CFF0B + STATUS_VHDSET_BACKING_STORAGE_NOT_FOUND NTStatus = 0xC05CFF0C + STATUS_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP NTStatus = 0xC05D0000 + STATUS_SMB_BAD_CLUSTER_DIALECT NTStatus = 0xC05D0001 + STATUS_SMB_GUEST_LOGON_BLOCKED NTStatus = 0xC05D0002 + STATUS_SECCORE_INVALID_COMMAND NTStatus = 0xC0E80000 + STATUS_VSM_NOT_INITIALIZED NTStatus = 0xC0450000 + STATUS_VSM_DMA_PROTECTION_NOT_IN_USE NTStatus = 0xC0450001 + STATUS_APPEXEC_CONDITION_NOT_SATISFIED NTStatus = 0xC0EC0000 + STATUS_APPEXEC_HANDLE_INVALIDATED NTStatus = 0xC0EC0001 + STATUS_APPEXEC_INVALID_HOST_GENERATION NTStatus = 0xC0EC0002 + STATUS_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION NTStatus = 0xC0EC0003 + STATUS_APPEXEC_INVALID_HOST_STATE NTStatus = 0xC0EC0004 + STATUS_APPEXEC_NO_DONOR NTStatus = 0xC0EC0005 + STATUS_APPEXEC_HOST_ID_MISMATCH NTStatus = 0xC0EC0006 + STATUS_APPEXEC_UNKNOWN_USER NTStatus = 0xC0EC0007 ) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index aa768631b..559bc845c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -172,6 +172,7 @@ var ( procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") procCreateEventW = modkernel32.NewProc("CreateEventW") @@ -182,6 +183,7 @@ var ( procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") @@ -203,6 +205,7 @@ var ( procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") @@ -239,6 +242,8 @@ var ( procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -273,8 +278,11 @@ var ( procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadResource = modkernel32.NewProc("LoadResource") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalFree = modkernel32.NewProc("LocalFree") procLockFileEx = modkernel32.NewProc("LockFileEx") + procLockResource = modkernel32.NewProc("LockResource") procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMoveFileW = modkernel32.NewProc("MoveFileW") @@ -289,6 +297,7 @@ var ( procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") procReadConsoleW = modkernel32.NewProc("ReadConsoleW") procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") @@ -314,6 +323,7 @@ var ( procSetFileTime = modkernel32.NewProc("SetFileTime") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") @@ -321,6 +331,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") procTerminateProcess = modkernel32.NewProc("TerminateProcess") @@ -344,11 +355,25 @@ var ( procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus") + procRtlDosPathNameToRelativeNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus") + procRtlGetCurrentPeb = modntdll.NewProc("RtlGetCurrentPeb") procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlInitString = modntdll.NewProc("RtlInitString") + procRtlInitUnicodeString = modntdll.NewProc("RtlInitUnicodeString") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procCLSIDFromString = modole32.NewProc("CLSIDFromString") procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoGetObject = modole32.NewProc("CoGetObject") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCoUninitialize = modole32.NewProc("CoUninitialize") procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procEnumProcesses = modpsapi.NewProc("EnumProcesses") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") @@ -370,6 +395,7 @@ var ( procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") @@ -1434,6 +1460,14 @@ func CloseHandle(handle Handle) (err error) { return } +func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) if r1 == 0 { @@ -1445,7 +1479,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) handle = Handle(r0) - if handle == 0 { + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) } return @@ -1454,7 +1488,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) handle = Handle(r0) - if handle == 0 { + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) } return @@ -1463,7 +1497,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) - if handle == 0 { + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) } return @@ -1507,7 +1541,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) handle = Handle(r0) - if handle == 0 { + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) } return @@ -1520,7 +1554,16 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) - if handle == 0 { + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = Handle(r0) + if handle == InvalidHandle { err = errnoErr(e1) } return @@ -1714,6 +1757,15 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) return } +func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + resInfo = Handle(r0) + if resInfo == 0 { + err = errnoErr(e1) + } + return +} + func FindVolumeClose(findVolume Handle) (err error) { r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) if r1 == 0 { @@ -2008,6 +2060,22 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { @@ -2322,6 +2390,24 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { return } +func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + resData = Handle(r0) + if resData == 0 { + err = errnoErr(e1) + } + return +} + +func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + ptr = uintptr(r0) + if ptr == 0 { + err = errnoErr(e1) + } + return +} + func LocalFree(hmem Handle) (handle Handle, err error) { r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) handle = Handle(r0) @@ -2339,6 +2425,15 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt return } +func LockResource(resData Handle) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) + } + return +} + func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) addr = uintptr(r0) @@ -2474,6 +2569,14 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 return } +func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) if r1 == 0 { @@ -2692,6 +2795,14 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb return } +func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetPriorityClass(process Handle, priorityClass uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) if r1 == 0 { @@ -2752,6 +2863,15 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) + } + return +} + func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { var _p0 uint32 if alertable { @@ -2810,7 +2930,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { return } -func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value uintptr, size uintptr, prevvalue uintptr, returnedsize *uintptr) (err error) { +func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) if r1 == 0 { err = errnoErr(e1) @@ -2946,19 +3066,97 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by return } +func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlGetCurrentPeb() (peb *PEB) { + r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + peb = (*PEB)(unsafe.Pointer(r0)) + return +} + func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { +func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) if r0 != 0 { - ret = syscall.Errno(r0) + ntstatus = NTStatus(r0) } return } +func RtlInitString(destinationString *NTString, sourceString *byte) { + syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { + syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + ret = syscall.Errno(r0) + return +} + func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) if r0 != 0 { @@ -2975,11 +3173,32 @@ func coCreateGuid(pguid *GUID) (ret error) { return } +func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { + r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { + r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func CoTaskMemFree(address unsafe.Pointer) { syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) return } +func CoUninitialize() { + syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + return +} + func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) @@ -3158,6 +3377,18 @@ func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferL return } +func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { diff --git a/vendor/modules.txt b/vendor/modules.txt index f1e50bf9d..fec7c5692 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -27,7 +27,7 @@ github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop # github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma -# github.com/aws/aws-sdk-go v1.37.22 +# github.com/aws/aws-sdk-go v1.37.26 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -126,7 +126,7 @@ github.com/jmespath/go-jmespath github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/parser -# github.com/klauspost/compress v1.11.9 +# github.com/klauspost/compress v1.11.12 ## explicit github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -259,7 +259,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b +# golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 ## explicit golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -313,7 +313,7 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210302174412-5ede27ff9881 +# google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb ## explicit google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 From 433fff000642cbc7f0be81623d6a368bacd4182c Mon Sep 17 00:00:00 2001 From: Nikolay Date: Tue, 9 Mar 2021 13:26:23 +0300 Subject: [PATCH 02/59] duplicate timeseries fix for prometheus_buckets function (#1119) * try fix for prometheus_buckets * merge possible end of the bucket collision --- app/vmselect/promql/exec_test.go | 204 +++++++++++++++++++++++++++++++ app/vmselect/promql/transform.go | 26 +++- 2 files changed, 224 insertions(+), 6 deletions(-) diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index c50c64eea..62f46632f 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -3653,6 +3653,210 @@ func TestExecSuccess(t *testing.T) { resultExpected := []netstorage.Result{r1, r2, r3, r4} f(q, resultExpected) }) + t.Run(`prometheus_buckets(overlapped ranges)`, func(t *testing.T) { + t.Parallel() + q := `sort(prometheus_buckets(( + alias(label_set(90, "foo", "bar", "vmrange", "0...0"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0...0.2"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0.2...0.25"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0...0.26"), "xxx"), + alias(label_set(time()/100, "foo", "bar", "vmrange", "0.2...40"), "xxx"), + alias(label_set(time()/10, "foo", "bar", "vmrange", "40...Inf"), "xxx"), + )))` + r1 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{90, 90, 90, 90, 90, 90}, + Timestamps: timestampsExpected, + } + r1.MetricName.MetricGroup = []byte("xxx") + r1.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0"), + }, + } + r2 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{140, 150, 160, 170, 180, 190}, + Timestamps: timestampsExpected, + } + r2.MetricName.MetricGroup = []byte("xxx") + r2.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0.2"), + }, + } + r3 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{190, 210, 230, 250, 270, 290}, + Timestamps: timestampsExpected, + } + r3.MetricName.MetricGroup = []byte("xxx") + r3.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0.25"), + }, + } + r4 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{240, 270, 300, 330, 360, 390}, + Timestamps: timestampsExpected, + } + r4.MetricName.MetricGroup = []byte("xxx") + r4.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0.26"), + }, + } + r5 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{250, 282, 314, 346, 378, 410}, + Timestamps: timestampsExpected, + } + r5.MetricName.MetricGroup = []byte("xxx") + r5.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("40"), + }, + } + r6 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{350, 402, 454, 506, 558, 610}, + Timestamps: timestampsExpected, + } + r6.MetricName.MetricGroup = []byte("xxx") + r6.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("Inf"), + }, + } + + resultExpected := []netstorage.Result{r1, r2, r3, r4, r5, r6} + f(q, resultExpected) + }) + t.Run(`prometheus_buckets(overlapped ranges at the end)`, func(t *testing.T) { + t.Parallel() + q := `sort(prometheus_buckets(( + alias(label_set(90, "foo", "bar", "vmrange", "0...0"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0...0.2"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0.2...0.25"), "xxx"), + alias(label_set(time()/20, "foo", "bar", "vmrange", "0...0.25"), "xxx"), + alias(label_set(time()/100, "foo", "bar", "vmrange", "0.2...40"), "xxx"), + alias(label_set(time()/10, "foo", "bar", "vmrange", "40...Inf"), "xxx"), + )))` + r1 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{90, 90, 90, 90, 90, 90}, + Timestamps: timestampsExpected, + } + r1.MetricName.MetricGroup = []byte("xxx") + r1.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0"), + }, + } + r2 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{140, 150, 160, 170, 180, 190}, + Timestamps: timestampsExpected, + } + r2.MetricName.MetricGroup = []byte("xxx") + r2.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0.2"), + }, + } + r3 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{190, 210, 230, 250, 270, 290}, + Timestamps: timestampsExpected, + } + r3.MetricName.MetricGroup = []byte("xxx") + r3.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("0.25"), + }, + } + r4 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{200, 222, 244, 266, 288, 310}, + Timestamps: timestampsExpected, + } + r4.MetricName.MetricGroup = []byte("xxx") + r4.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("40"), + }, + } + r5 := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{300, 342, 384, 426, 468, 510}, + Timestamps: timestampsExpected, + } + r5.MetricName.MetricGroup = []byte("xxx") + r5.MetricName.Tags = []storage.Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("le"), + Value: []byte("Inf"), + }, + } + + resultExpected := []netstorage.Result{r1, r2, r3, r4, r5} + f(q, resultExpected) + }) t.Run(`median_over_time()`, func(t *testing.T) { t.Parallel() q := `median_over_time({})` diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index a0e0d23c0..ded21f2a9 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -518,6 +518,7 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries { sort.Slice(xss, func(i, j int) bool { return xss[i].end < xss[j].end }) xssNew := make([]x, 0, len(xss)+2) var xsPrev x + uniqTs := make(map[string]*timeseries, len(xss)) for _, xs := range xss { ts := xs.ts if isZeroTS(ts) { @@ -526,14 +527,27 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries { continue } if xs.start != xsPrev.end { - xssNew = append(xssNew, x{ - endStr: xs.startStr, - end: xs.start, - ts: copyTS(ts, xs.startStr), - }) + // check for duplicates at the start of bucket. + // in case of duplicate following le already exists. + // no need to add new one with zero values. + if _, ok := uniqTs[xs.startStr]; !ok { + uniqTs[xs.startStr] = xs.ts + xssNew = append(xssNew, x{ + endStr: xs.startStr, + end: xs.start, + ts: copyTS(ts, xs.startStr), + }) + } } ts.MetricName.AddTag("le", xs.endStr) - xssNew = append(xssNew, xs) + if prevTs, ok := uniqTs[xs.endStr]; !ok { + xssNew = append(xssNew, xs) + uniqTs[xs.endStr] = xs.ts + } else { + // end of current bucket not uniq, + // need to merge it with existing bucket. + mergeNonOverlappingTimeseries(prevTs, xs.ts) + } xsPrev = xs } if !math.IsInf(xsPrev.end, 1) { From fe8b12fbadc900d8c6a6395c3b70d1a8fe881b44 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 12:46:11 +0200 Subject: [PATCH 03/59] app/vmselect/promql: follow up for 433fff000642cbc7f0be81623d6a368bacd4182c --- app/vmselect/promql/transform.go | 29 ++++++++++++----------------- docs/CHANGELOG.md | 1 + 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index ded21f2a9..495a174db 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -526,27 +526,22 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries { xsPrev = xs continue } - if xs.start != xsPrev.end { - // check for duplicates at the start of bucket. - // in case of duplicate following le already exists. - // no need to add new one with zero values. - if _, ok := uniqTs[xs.startStr]; !ok { - uniqTs[xs.startStr] = xs.ts - xssNew = append(xssNew, x{ - endStr: xs.startStr, - end: xs.start, - ts: copyTS(ts, xs.startStr), - }) - } + if xs.start != xsPrev.end && uniqTs[xs.startStr] == nil { + uniqTs[xs.startStr] = xs.ts + xssNew = append(xssNew, x{ + endStr: xs.startStr, + end: xs.start, + ts: copyTS(ts, xs.startStr), + }) } ts.MetricName.AddTag("le", xs.endStr) - if prevTs, ok := uniqTs[xs.endStr]; !ok { + prevTs := uniqTs[xs.endStr] + if prevTs != nil { + // the end of the current bucket is not unique, need to merge it with the existing bucket. + mergeNonOverlappingTimeseries(prevTs, xs.ts) + } else { xssNew = append(xssNew, xs) uniqTs[xs.endStr] = xs.ts - } else { - // end of current bucket not uniq, - // need to merge it with existing bucket. - mergeNonOverlappingTimeseries(prevTs, xs.ts) } xsPrev = xs } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d13dfbe76..fdfbdcd81 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -12,6 +12,7 @@ * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 +* BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 From e44532b760d487a8cc55ae86b9fb1c8d90b593c8 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 13:12:01 +0200 Subject: [PATCH 04/59] docs/CHANGELOG.md: mention about improved query performance at https://github.com/VictoriaMetrics/VictoriaMetrics/commit/18fe0ff14bc78860c5569e2b70de1db78fac61be --- docs/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index fdfbdcd81..7a4fb6f23 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,10 +6,12 @@ - `histogram_avg(buckets)` - returns the average value for the given buckets. - `histogram_stdvar(buckets)` - returns standard variance for the given buckets. - `histogram_stddev(buckets)` - returns standard deviation for the given buckets. +* FEATURE: reduce median query duration by up to 2x. See https://github.com/VictoriaMetrics/VictoriaMetrics/commit/18fe0ff14bc78860c5569e2b70de1db78fac61be * FEATURE: vmagent: add ability to replicate scrape targets among `vmagent` instances in the cluster with `-promscrape.cluster.replicationFactor` command-line flag. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets). * FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. + * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 * BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. From 4b18a4f026b301344e7fafb3dfec745b54fee23e Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 15:04:30 +0200 Subject: [PATCH 05/59] lib/promscrape/discovery/kubernetes: remove too verbose logs about starting and stopping the watchers Log the number of objects loaded per each watch url Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 --- lib/promscrape/discovery/kubernetes/api_watcher.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 10066dea6..b997f2832 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -159,9 +159,7 @@ func (aw *apiWatcher) startWatcherForURL(role, apiURL string, parseObject parseO aw.wg.Add(1) go func() { defer aw.wg.Done() - logger.Infof("started watcher for %q", apiURL) uw.watchForUpdates() - logger.Infof("stopped watcher for %q", apiURL) uw.objectsByKey.decRef() aw.mu.Lock() @@ -291,6 +289,7 @@ func (uw *urlWatcher) reloadObjects() string { } return "" } + logger.Infof("loaded %d objects from %q", len(objectsByKey), requestURL) uw.objectsByKey.reload(objectsByKey) swosByKey := make(map[string][]interface{}) for k, o := range objectsByKey { From 3fd8653b40113de40b6dc265724104fcf2f776d9 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 15:47:15 +0200 Subject: [PATCH 06/59] lib/promscrape: apply `sample_limit` after metric relabeling is applied as Prometheus does See the description for `sample_limit` option from Prometheus docs: Per-scrape limit on number of scraped samples that will be accepted. If more than this number of samples are present after metric relabeling the entire scrape will be treated as failed. 0 means no limit. https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config --- docs/CHANGELOG.md | 4 ++-- lib/promscrape/scrapework.go | 24 ++++++------------------ lib/promscrape/scrapework_test.go | 2 +- 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 7a4fb6f23..958d0e344 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -11,9 +11,9 @@ * FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. - * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). -* BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 +* BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 +* BUGFIX: vmagent: apply `sample_limit` only after `metric_relabel_configs` are applied as Prometheus does. Previously the `sample_limit` was applied before metrics relabeling. * BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index e5e28ff42..003b0348a 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -290,27 +290,15 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error srcRows := wc.rows.Rows samplesScraped := len(srcRows) scrapedSamples.Update(float64(samplesScraped)) - if sw.Config.SampleLimit > 0 && samplesScraped > sw.Config.SampleLimit { - srcRows = srcRows[:0] + for i := range srcRows { + sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true) + } + samplesPostRelabeling := len(wc.writeRequest.Timeseries) + if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit { + wc.resetNoRows() up = 0 scrapesSkippedBySampleLimit.Inc() } - samplesPostRelabeling := 0 - for i := range srcRows { - sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true) - if len(wc.labels) > 40000 { - // Limit the maximum size of wc.writeRequest. - // This should reduce memory usage when scraping targets with millions of metrics and/or labels. - // For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/ - samplesPostRelabeling += len(wc.writeRequest.Timeseries) - sw.updateSeriesAdded(wc) - startTime := time.Now() - sw.PushData(&wc.writeRequest) - pushDataDuration.UpdateDuration(startTime) - wc.resetNoRows() - } - } - samplesPostRelabeling += len(wc.writeRequest.Timeseries) sw.updateSeriesAdded(wc) seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling) sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp) diff --git a/lib/promscrape/scrapework_test.go b/lib/promscrape/scrapework_test.go index 74b6f67e8..79e6cb6ce 100644 --- a/lib/promscrape/scrapework_test.go +++ b/lib/promscrape/scrapework_test.go @@ -332,7 +332,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) { up 0 123 scrape_samples_scraped 2 123 scrape_duration_seconds 0 123 - scrape_samples_post_metric_relabeling 0 123 + scrape_samples_post_metric_relabeling 2 123 scrape_series_added 0 123 `) } From ad34f42467bc6c8c57aae3516ee60322f5d3b211 Mon Sep 17 00:00:00 2001 From: Nikolay Date: Tue, 9 Mar 2021 19:51:00 +0300 Subject: [PATCH 07/59] Changes tlsConfig init for proxy connections (#1121) https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 --- lib/promscrape/client.go | 2 +- lib/promscrape/discoveryutils/client.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index 05a12f12f..ce8744886 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -57,7 +57,7 @@ func newClient(sw *ScrapeWork) *client { requestURI := string(u.RequestURI()) isTLS := string(u.Scheme()) == "https" var tlsCfg *tls.Config - if isTLS { + if sw.AuthConfig != nil { tlsCfg = sw.AuthConfig.NewTLSConfig() } if !strings.Contains(host, ":") { diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go index 65b514d63..58fe34bc9 100644 --- a/lib/promscrape/discoveryutils/client.go +++ b/lib/promscrape/discoveryutils/client.go @@ -66,7 +66,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie hostPort := string(u.Host()) isTLS := string(u.Scheme()) == "https" - if isTLS && ac != nil { + if ac != nil { tlsCfg = ac.NewTLSConfig() } if !strings.Contains(hostPort, ":") { From 36fd00724712b86990c838aad2cae6c042288250 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 18:54:09 +0200 Subject: [PATCH 08/59] lib/proxy: set missing ServerName in TLS config for `proxy_url`. While at it, allow setting Proxy-Authorization for `proxy_url` via `basic_auth` and `bearer_token` configs. --- docs/CHANGELOG.md | 1 + lib/promauth/config.go | 5 ++- lib/promscrape/client.go | 4 +-- lib/promscrape/discoveryutils/client.go | 4 +-- lib/promscrape/statconn.go | 6 ++-- lib/proxy/proxy.go | 47 +++++++++++++++++++++---- 6 files changed, 53 insertions(+), 14 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 958d0e344..d15ff460d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -14,6 +14,7 @@ * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 * BUGFIX: vmagent: apply `sample_limit` only after `metric_relabel_configs` are applied as Prometheus does. Previously the `sample_limit` was applied before metrics relabeling. +* BUGFIX: vmagent: properly apply `tls_config`, `basic_auth` and `bearer_token` to proxy connections if `proxy_url` option is set. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 * BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 diff --git a/lib/promauth/config.go b/lib/promauth/config.go index 184f872bf..f80b15ae2 100644 --- a/lib/promauth/config.go +++ b/lib/promauth/config.go @@ -65,13 +65,16 @@ func (ac *Config) tlsCertificateString() string { // NewTLSConfig returns new TLS config for the given ac. func (ac *Config) NewTLSConfig() *tls.Config { tlsCfg := &tls.Config{ - RootCAs: ac.TLSRootCA, ClientSessionCache: tls.NewLRUClientSessionCache(0), } + if ac == nil { + return tlsCfg + } if ac.TLSCertificate != nil { // Do not set tlsCfg.GetClientCertificate, since tlsCfg.Certificates should work OK. tlsCfg.Certificates = []tls.Certificate{*ac.TLSCertificate} } + tlsCfg.RootCAs = ac.TLSRootCA tlsCfg.ServerName = ac.TLSServerName tlsCfg.InsecureSkipVerify = ac.TLSInsecureSkipVerify return tlsCfg diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index ce8744886..958c06055 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -57,7 +57,7 @@ func newClient(sw *ScrapeWork) *client { requestURI := string(u.RequestURI()) isTLS := string(u.Scheme()) == "https" var tlsCfg *tls.Config - if sw.AuthConfig != nil { + if isTLS { tlsCfg = sw.AuthConfig.NewTLSConfig() } if !strings.Contains(host, ":") { @@ -67,7 +67,7 @@ func newClient(sw *ScrapeWork) *client { host += ":443" } } - dialFunc, err := newStatDialFunc(sw.ProxyURL, tlsCfg) + dialFunc, err := newStatDialFunc(sw.ProxyURL, sw.AuthConfig) if err != nil { logger.Fatalf("cannot create dial func: %s", err) } diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go index 58fe34bc9..ecddf92cc 100644 --- a/lib/promscrape/discoveryutils/client.go +++ b/lib/promscrape/discoveryutils/client.go @@ -66,7 +66,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie hostPort := string(u.Host()) isTLS := string(u.Scheme()) == "https" - if ac != nil { + if isTLS { tlsCfg = ac.NewTLSConfig() } if !strings.Contains(hostPort, ":") { @@ -77,7 +77,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL proxy.URL) (*Clie hostPort = net.JoinHostPort(hostPort, port) } if dialFunc == nil { - dialFunc, err = proxyURL.NewDialFunc(tlsCfg) + dialFunc, err = proxyURL.NewDialFunc(ac) if err != nil { return nil, err } diff --git a/lib/promscrape/statconn.go b/lib/promscrape/statconn.go index 2fefaf420..d8bbdce9f 100644 --- a/lib/promscrape/statconn.go +++ b/lib/promscrape/statconn.go @@ -2,7 +2,6 @@ package promscrape import ( "context" - "crypto/tls" "fmt" "net" "sync" @@ -10,6 +9,7 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" "github.com/VictoriaMetrics/fasthttp" "github.com/VictoriaMetrics/metrics" @@ -49,8 +49,8 @@ var ( stdDialerOnce sync.Once ) -func newStatDialFunc(proxyURL proxy.URL, tlsConfig *tls.Config) (fasthttp.DialFunc, error) { - dialFunc, err := proxyURL.NewDialFunc(tlsConfig) +func newStatDialFunc(proxyURL proxy.URL, ac *promauth.Config) (fasthttp.DialFunc, error) { + dialFunc, err := proxyURL.NewDialFunc(ac) if err != nil { return nil, err } diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index bb3c339ef..0bc87398a 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -7,9 +7,11 @@ import ( "fmt" "net" "net/url" + "strings" "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/fasthttp" ) @@ -48,8 +50,8 @@ func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// NewDialFunc returns dial func for the given pu and tlsConfig. -func (u *URL) NewDialFunc(tlsConfig *tls.Config) (fasthttp.DialFunc, error) { +// NewDialFunc returns dial func for the given u and ac. +func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { if u == nil || u.url == nil { return defaultDialFunc, nil } @@ -57,18 +59,32 @@ func (u *URL) NewDialFunc(tlsConfig *tls.Config) (fasthttp.DialFunc, error) { if pu.Scheme != "http" && pu.Scheme != "https" { return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be http or https", pu.Scheme, pu) } + isTLS := pu.Scheme == "https" + proxyAddr := addMissingPort(pu.Host, isTLS) var authHeader string + if ac != nil { + authHeader = ac.Authorization + } if pu.User != nil && len(pu.User.Username()) > 0 { userPasswordEncoded := base64.StdEncoding.EncodeToString([]byte(pu.User.String())) - authHeader = "Proxy-Authorization: Basic " + userPasswordEncoded + "\r\n" + authHeader = "Basic " + userPasswordEncoded } + if authHeader != "" { + authHeader = "Proxy-Authorization: " + authHeader + "\r\n" + } + tlsCfg := ac.NewTLSConfig() dialFunc := func(addr string) (net.Conn, error) { - proxyConn, err := defaultDialFunc(pu.Host) + proxyConn, err := defaultDialFunc(proxyAddr) if err != nil { return nil, fmt.Errorf("cannot connect to proxy %q: %w", pu, err) } - if pu.Scheme == "https" { - proxyConn = tls.Client(proxyConn, tlsConfig) + if isTLS { + tlsCfgLocal := tlsCfg + if !tlsCfgLocal.InsecureSkipVerify && tlsCfgLocal.ServerName == "" { + tlsCfgLocal = tlsCfgLocal.Clone() + tlsCfgLocal.ServerName = tlsServerName(addr) + } + proxyConn = tls.Client(proxyConn, tlsCfgLocal) } conn, err := sendConnectRequest(proxyConn, addr, authHeader) if err != nil { @@ -80,6 +96,25 @@ func (u *URL) NewDialFunc(tlsConfig *tls.Config) (fasthttp.DialFunc, error) { return dialFunc, nil } +func addMissingPort(addr string, isTLS bool) string { + if strings.IndexByte(addr, ':') >= 0 { + return addr + } + port := "80" + if isTLS { + port = "443" + } + return addr + ":" + port +} + +func tlsServerName(addr string) string { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return addr + } + return host +} + func defaultDialFunc(addr string) (net.Conn, error) { network := "tcp4" if netutil.TCP6Enabled() { From 787242d7b0b7664c9ba1a31375b268d7cb2d08de Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 20:39:38 +0200 Subject: [PATCH 09/59] lib/proxy: pass proxy hostname in `Host` header of the `CONNECT` request This should resolve the following issue when connecting to tls proxy: cannot validate certificate for ... because it doesn't contain any IP SANs --- lib/proxy/proxy.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 0bc87398a..0bb6f9e8d 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -86,7 +86,7 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { } proxyConn = tls.Client(proxyConn, tlsCfgLocal) } - conn, err := sendConnectRequest(proxyConn, addr, authHeader) + conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader) if err != nil { _ = proxyConn.Close() return nil, fmt.Errorf("error when sending CONNECT request to proxy %q: %w", pu, err) @@ -125,8 +125,8 @@ func defaultDialFunc(addr string) (net.Conn, error) { } // sendConnectRequest sends CONNECT request to proxyConn for the given addr and authHeader and returns the established connection to dstAddr. -func sendConnectRequest(proxyConn net.Conn, dstAddr, authHeader string) (net.Conn, error) { - req := "CONNECT " + dstAddr + " HTTP/1.1\r\nHost: " + dstAddr + "\r\n" + authHeader + "\r\n" +func sendConnectRequest(proxyConn net.Conn, proxyAddr, dstAddr, authHeader string) (net.Conn, error) { + req := "CONNECT " + dstAddr + " HTTP/1.1\r\nHost: " + proxyAddr + "\r\n" + authHeader + "\r\n" if _, err := proxyConn.Write([]byte(req)); err != nil { return nil, fmt.Errorf("cannot send CONNECT request for dstAddr=%q: %w", dstAddr, err) } From 2728b25783067ac4ba53e2d06a58802b1a31abe0 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 20:56:06 +0200 Subject: [PATCH 10/59] docs/CHANGELOG.md: mention about the bugfix from 787242d7b0b7664c9ba1a31375b268d7cb2d08de --- docs/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d15ff460d..fd48af9e8 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -15,6 +15,7 @@ * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 * BUGFIX: vmagent: apply `sample_limit` only after `metric_relabel_configs` are applied as Prometheus does. Previously the `sample_limit` was applied before metrics relabeling. * BUGFIX: vmagent: properly apply `tls_config`, `basic_auth` and `bearer_token` to proxy connections if `proxy_url` option is set. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 +* BUGFIX: vmagent: properly scrape targets via https proxy specified in `proxy_url` if `insecure_skip_verify` flag isn't set in `tls_config` section. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 * BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 From 48fb0d1c4bd1ebaad55951d8ac759de295ea32fe Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 9 Mar 2021 21:33:48 +0200 Subject: [PATCH 11/59] vendor: update github.com/VictoriaMetrics/fasthttp from v1.0.13 to v1.0.14 --- go.mod | 2 +- go.sum | 5 ++--- vendor/github.com/VictoriaMetrics/fasthttp/go.mod | 2 +- vendor/github.com/VictoriaMetrics/fasthttp/go.sum | 4 ++-- vendor/modules.txt | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index dd0ac52ac..f1721e98c 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( // Do not use the original github.com/valyala/fasthttp because of issues // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b - github.com/VictoriaMetrics/fasthttp v1.0.13 + github.com/VictoriaMetrics/fasthttp v1.0.14 github.com/VictoriaMetrics/metrics v1.15.2 github.com/VictoriaMetrics/metricsql v0.14.0 github.com/aws/aws-sdk-go v1.37.26 diff --git a/go.sum b/go.sum index 7c821a6c5..0396ff841 100644 --- a/go.sum +++ b/go.sum @@ -82,8 +82,8 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VictoriaMetrics/fastcache v1.5.8 h1:XW+YVx9lEXITBVv35ugK9OyotdNJVcbza69o3jmqWuI= github.com/VictoriaMetrics/fastcache v1.5.8/go.mod h1:SiMZNgwEPJ9qWLshu9tyuE6bKc9ZWYhcNV/L7jurprQ= -github.com/VictoriaMetrics/fasthttp v1.0.13 h1:5JNS4vSPdN4QyfcpAg3Y1Wznf0uXEuSOFpeIlFw3MgM= -github.com/VictoriaMetrics/fasthttp v1.0.13/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= +github.com/VictoriaMetrics/fasthttp v1.0.14 h1:iWCdHg7JQ1SO0xvPAgw3QFpFT3he+Ugdshg+1clN6CQ= +github.com/VictoriaMetrics/fasthttp v1.0.14/go.mod h1:eDVgYyGts3xXpYpVGDxQ3ZlQKW5TSvOqfc9FryjH1JA= github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metrics v1.15.2 h1:w/GD8L9tm+gvx1oZvAofRRXwammiicdI0jgLghA2Gdo= github.com/VictoriaMetrics/metrics v1.15.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= @@ -507,7 +507,6 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/go.mod b/vendor/github.com/VictoriaMetrics/fasthttp/go.mod index e738de9b0..155f42daa 100644 --- a/vendor/github.com/VictoriaMetrics/fasthttp/go.mod +++ b/vendor/github.com/VictoriaMetrics/fasthttp/go.mod @@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/fasthttp go 1.13 require ( - github.com/klauspost/compress v1.11.3 + github.com/klauspost/compress v1.11.12 github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a ) diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/go.sum b/vendor/github.com/VictoriaMetrics/fasthttp/go.sum index 6fc4f7e42..6b6e7262a 100644 --- a/vendor/github.com/VictoriaMetrics/fasthttp/go.sum +++ b/vendor/github.com/VictoriaMetrics/fasthttp/go.sum @@ -1,5 +1,5 @@ -github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc= diff --git a/vendor/modules.txt b/vendor/modules.txt index fec7c5692..af86f5e9f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -13,7 +13,7 @@ cloud.google.com/go/storage # github.com/VictoriaMetrics/fastcache v1.5.8 ## explicit github.com/VictoriaMetrics/fastcache -# github.com/VictoriaMetrics/fasthttp v1.0.13 +# github.com/VictoriaMetrics/fasthttp v1.0.14 ## explicit github.com/VictoriaMetrics/fasthttp github.com/VictoriaMetrics/fasthttp/fasthttputil From d212f35ae830629e4cb40f56fb0e8fa864f2dfa7 Mon Sep 17 00:00:00 2001 From: Ihor Borodin Date: Tue, 9 Mar 2021 22:49:50 +0200 Subject: [PATCH 12/59] Fixing examples of external.alert.source in documentation (#1120) * Fixing examples of external.alert.source in documentation --- app/vmalert/README.md | 52 ++++++++++++++-------------- app/vmalert/main.go | 8 ++--- deployment/docker/docker-compose.yml | 3 +- docs/vmalert.md | 52 ++++++++++++++-------------- 4 files changed, 57 insertions(+), 58 deletions(-) diff --git a/app/vmalert/README.md b/app/vmalert/README.md index adfed9bf2..5a286c289 100644 --- a/app/vmalert/README.md +++ b/app/vmalert/README.md @@ -16,7 +16,7 @@ rules against configured address. * Lightweight without extra dependencies. ### Limitations: -* `vmalert` execute queries against remote datasource which has reliability risks because of network. +* `vmalert` execute queries against remote datasource which has reliability risks because of network. It is recommended to configure alerts thresholds and rules expressions with understanding that network request may fail; * by default, rules execution is sequential within one group, but persisting of execution results to remote @@ -37,7 +37,7 @@ The build binary will be placed to `VictoriaMetrics/bin` folder. To start using `vmalert` you will need the following things: * list of rules - PromQL/MetricsQL expressions to execute; * datasource address - reachable VictoriaMetrics instance for rules execution; -* notifier address - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing, +* notifier address - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing, aggregating alerts and sending notifications. * remote write address - [remote write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) compatible storage address for storing recording rules results and alerts state in for of timeseries. This is optional. @@ -56,11 +56,11 @@ Then configure `vmalert` accordingly: ``` If you run multiple `vmalert` services for the same datastore or AlertManager - do not forget -to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts. +to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts. -Configuration for [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) -and [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) rules is very -similar to Prometheus rules and configured using YAML. Configuration examples may be found +Configuration for [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) +and [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) rules is very +similar to Prometheus rules and configured using YAML. Configuration examples may be found in [testdata](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/config/testdata) folder. Every `rule` belongs to `group` and every configuration file may contain arbitrary number of groups: ```yaml @@ -79,7 +79,7 @@ name: [ interval: | default = global.evaluation_interval ] # How many rules execute at once. Increasing concurrency may speed -# up round execution speed. +# up round execution speed. [ concurrency: | default = 1 ] # Optional type for expressions inside the rules. Supported values: "graphite" and "prometheus". @@ -93,15 +93,15 @@ rules: #### Rules There are two types of Rules: -* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - +* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - Alerting rules allows to define alert conditions via [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) and to send notifications about firing alerts to [Alertmanager](https://github.com/prometheus/alertmanager). -* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) - -Recording rules allow you to precompute frequently needed or computationally expensive expressions +* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) - +Recording rules allow you to precompute frequently needed or computationally expensive expressions and save their result as a new set of time series. `vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels -within one group. +within one group. ##### Alerting rules @@ -130,7 +130,7 @@ labels: # Annotations to add to each alert. annotations: [ : ] -``` +``` ##### Recording rules @@ -158,17 +158,17 @@ For recording rules to work `-remoteWrite.url` must specified. #### Alerts state on restarts -`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` +`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags: -* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or VMInsert (Cluster). `vmalert` will persist alerts state -into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol. -These are regular time series and may be queried from VM just as any other time series. +* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or VMInsert (Cluster). `vmalert` will persist alerts state +into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol. +These are regular time series and may be queried from VM just as any other time series. The state stored to the configured address on every rule evaluation. -* `-remoteRead.url` - URL to VictoriaMetrics (Single) or VMSelect (Cluster). `vmalert` will try to restore alerts state +* `-remoteRead.url` - URL to VictoriaMetrics (Single) or VMSelect (Cluster). `vmalert` will try to restore alerts state from configured address by querying time series with name `ALERTS_FOR_STATE`. Both flags are required for the proper state restoring. Restore process may fail if time series are missing -in configured `-remoteRead.url`, weren't updated in the last `1h` or received state doesn't match current `vmalert` +in configured `-remoteRead.url`, weren't updated in the last `1h` or received state doesn't match current `vmalert` rules configuration. @@ -232,7 +232,7 @@ The shortlist of configuration flags is the following: How often to evaluate the rules (default 1m0s) -external.alert.source string External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. - eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|pathEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used + eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used -external.label array Optional label in the form 'name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets. Supports array of values separated by comma or specified via multiple flags. @@ -348,11 +348,11 @@ The shortlist of configuration flags is the following: -remoteWrite.url string Optional URL to Victoria Metrics or VMInsert where to persist alerts state and recording rules results in form of timeseries. E.g. http://127.0.0.1:8428 -rule array - Path to the file with alert rules. - Supports patterns. Flag can be specified multiple times. + Path to the file with alert rules. + Supports patterns. Flag can be specified multiple times. Examples: -rule="/path/to/file". Path to a single file with alerting rules - -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, + -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, absolute path to all .yaml files in root. Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars. Supports array of values separated by comma or specified via multiple flags. @@ -370,7 +370,7 @@ The shortlist of configuration flags is the following: Show VictoriaMetrics version ``` -Pass `-help` to `vmalert` in order to see the full list of supported +Pass `-help` to `vmalert` in order to see the full list of supported command-line flags with their descriptions. To reload configuration without `vmalert` restart send SIGHUP signal @@ -379,13 +379,13 @@ or send GET request to `/-/reload` endpoint. ### Contributing `vmalert` is mostly designed and built by VictoriaMetrics community. -Feel free to share your experience and ideas for improving this +Feel free to share your experience and ideas for improving this software. Please keep simplicity as the main priority. ### How to build from sources -It is recommended using -[binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) +It is recommended using +[binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmalert` is located in `vmutils-*` archives there. diff --git a/app/vmalert/main.go b/app/vmalert/main.go index d04584eb2..baae921e3 100644 --- a/app/vmalert/main.go +++ b/app/vmalert/main.go @@ -26,11 +26,11 @@ import ( ) var ( - rulePath = flagutil.NewArray("rule", `Path to the file with alert rules. -Supports patterns. Flag can be specified multiple times. + rulePath = flagutil.NewArray("rule", `Path to the file with alert rules. +Supports patterns. Flag can be specified multiple times. Examples: -rule="/path/to/file". Path to a single file with alerting rules - -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, + -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, absolute path to all .yaml files in root. Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.`) @@ -41,7 +41,7 @@ Rule files may contain %{ENV_VAR} placeholders, which are substituted by the cor validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine") externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier") externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. -eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|pathEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`) +eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`) externalLabels = flagutil.NewArray("external.label", "Optional label in the form 'name=value' to add to all generated recording rules and alerts. "+ "Pass multiple -label flags in order to add multiple label sets.") diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml index 736d3f4bd..31869646c 100644 --- a/deployment/docker/docker-compose.yml +++ b/deployment/docker/docker-compose.yml @@ -70,8 +70,7 @@ services: - '--rule=/etc/alerts/*.yml' # display source of alerts in grafana - '-external.url=http://127.0.0.1:3000' #grafana outside container - - '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":"{{$$expr|quotesEscape|pathEscape}}"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' ## when copypaste the line be aware of '$$' for escaping in '$expr' - networks: + - '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":"{{$$expr|quotesEscape|crlfEscape|queryEscape}}"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' ## when copypaste the line be aware of '$$' for escaping in '$expr' networks: - vm_net restart: always alertmanager: diff --git a/docs/vmalert.md b/docs/vmalert.md index adfed9bf2..5a286c289 100644 --- a/docs/vmalert.md +++ b/docs/vmalert.md @@ -16,7 +16,7 @@ rules against configured address. * Lightweight without extra dependencies. ### Limitations: -* `vmalert` execute queries against remote datasource which has reliability risks because of network. +* `vmalert` execute queries against remote datasource which has reliability risks because of network. It is recommended to configure alerts thresholds and rules expressions with understanding that network request may fail; * by default, rules execution is sequential within one group, but persisting of execution results to remote @@ -37,7 +37,7 @@ The build binary will be placed to `VictoriaMetrics/bin` folder. To start using `vmalert` you will need the following things: * list of rules - PromQL/MetricsQL expressions to execute; * datasource address - reachable VictoriaMetrics instance for rules execution; -* notifier address - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing, +* notifier address - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing, aggregating alerts and sending notifications. * remote write address - [remote write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) compatible storage address for storing recording rules results and alerts state in for of timeseries. This is optional. @@ -56,11 +56,11 @@ Then configure `vmalert` accordingly: ``` If you run multiple `vmalert` services for the same datastore or AlertManager - do not forget -to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts. +to specify different `external.label` flags in order to define which `vmalert` generated rules or alerts. -Configuration for [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) -and [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) rules is very -similar to Prometheus rules and configured using YAML. Configuration examples may be found +Configuration for [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) +and [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) rules is very +similar to Prometheus rules and configured using YAML. Configuration examples may be found in [testdata](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/config/testdata) folder. Every `rule` belongs to `group` and every configuration file may contain arbitrary number of groups: ```yaml @@ -79,7 +79,7 @@ name: [ interval: | default = global.evaluation_interval ] # How many rules execute at once. Increasing concurrency may speed -# up round execution speed. +# up round execution speed. [ concurrency: | default = 1 ] # Optional type for expressions inside the rules. Supported values: "graphite" and "prometheus". @@ -93,15 +93,15 @@ rules: #### Rules There are two types of Rules: -* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - +* [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - Alerting rules allows to define alert conditions via [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) and to send notifications about firing alerts to [Alertmanager](https://github.com/prometheus/alertmanager). -* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) - -Recording rules allow you to precompute frequently needed or computationally expensive expressions +* [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) - +Recording rules allow you to precompute frequently needed or computationally expensive expressions and save their result as a new set of time series. `vmalert` forbids to define duplicates - rules with the same combination of name, expression and labels -within one group. +within one group. ##### Alerting rules @@ -130,7 +130,7 @@ labels: # Annotations to add to each alert. annotations: [ : ] -``` +``` ##### Recording rules @@ -158,17 +158,17 @@ For recording rules to work `-remoteWrite.url` must specified. #### Alerts state on restarts -`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` +`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after reloading of `vmalert` the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags: -* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or VMInsert (Cluster). `vmalert` will persist alerts state -into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol. -These are regular time series and may be queried from VM just as any other time series. +* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or VMInsert (Cluster). `vmalert` will persist alerts state +into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol. +These are regular time series and may be queried from VM just as any other time series. The state stored to the configured address on every rule evaluation. -* `-remoteRead.url` - URL to VictoriaMetrics (Single) or VMSelect (Cluster). `vmalert` will try to restore alerts state +* `-remoteRead.url` - URL to VictoriaMetrics (Single) or VMSelect (Cluster). `vmalert` will try to restore alerts state from configured address by querying time series with name `ALERTS_FOR_STATE`. Both flags are required for the proper state restoring. Restore process may fail if time series are missing -in configured `-remoteRead.url`, weren't updated in the last `1h` or received state doesn't match current `vmalert` +in configured `-remoteRead.url`, weren't updated in the last `1h` or received state doesn't match current `vmalert` rules configuration. @@ -232,7 +232,7 @@ The shortlist of configuration flags is the following: How often to evaluate the rules (default 1m0s) -external.alert.source string External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. - eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|pathEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used + eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used -external.label array Optional label in the form 'name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets. Supports array of values separated by comma or specified via multiple flags. @@ -348,11 +348,11 @@ The shortlist of configuration flags is the following: -remoteWrite.url string Optional URL to Victoria Metrics or VMInsert where to persist alerts state and recording rules results in form of timeseries. E.g. http://127.0.0.1:8428 -rule array - Path to the file with alert rules. - Supports patterns. Flag can be specified multiple times. + Path to the file with alert rules. + Supports patterns. Flag can be specified multiple times. Examples: -rule="/path/to/file". Path to a single file with alerting rules - -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, + -rule="dir/*.yaml" -rule="/*.yaml". Relative path to all .yaml files in "dir" folder, absolute path to all .yaml files in root. Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars. Supports array of values separated by comma or specified via multiple flags. @@ -370,7 +370,7 @@ The shortlist of configuration flags is the following: Show VictoriaMetrics version ``` -Pass `-help` to `vmalert` in order to see the full list of supported +Pass `-help` to `vmalert` in order to see the full list of supported command-line flags with their descriptions. To reload configuration without `vmalert` restart send SIGHUP signal @@ -379,13 +379,13 @@ or send GET request to `/-/reload` endpoint. ### Contributing `vmalert` is mostly designed and built by VictoriaMetrics community. -Feel free to share your experience and ideas for improving this +Feel free to share your experience and ideas for improving this software. Please keep simplicity as the main priority. ### How to build from sources -It is recommended using -[binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) +It is recommended using +[binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmalert` is located in `vmutils-*` archives there. From 96dbe9bcbd25ee0fa3733bb26185504333e8cac6 Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:05:18 +0200 Subject: [PATCH 13/59] Add files via upload --- docs/BestPractices.md | 59 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 docs/BestPractices.md diff --git a/docs/BestPractices.md b/docs/BestPractices.md new file mode 100644 index 000000000..560decfed --- /dev/null +++ b/docs/BestPractices.md @@ -0,0 +1,59 @@ +# VM best practices + + +VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). + +## Install Recommendation + There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. + There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). + Filesystem Considerations + The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: + mkfs.ext4 ... -O 64bit,huge_file,extent -T huge + +## Operation System + When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. + +## VictoriaMetrics Versions + Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. + +## Upgrade + It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. + It is also safe to downgrade to the previous version unless release notes say otherwise. + The following steps must be performed during the upgrade / downgrade process: + Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. + Wait until the process stops. This can take a few seconds. + Start the upgraded VictoriaMetrics. + Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. + +## Security + Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: + -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. + -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. + -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. + -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. + -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. + -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. + Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. + It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. + +## Backup Recommendations + VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. + +## Networking + Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. + +## Storage Considerations + Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. + +## RAM + RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. + +## CPU + CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. + +## Technical Support and Services + If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. + +Following VictoriaMetrics best practices allows for the optimal configuration of our fast and scalable monitoring solution and time series database while minimizing or avoiding downtime or performance issues during installation and software usage. Our best practices also allow you to quickly troubleshoot any issues that might arise. + + From 09dc49e942d021d202b9ac5c7af8d5038038ac3c Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:06:49 +0200 Subject: [PATCH 14/59] Add files via upload --- docs/BestPractices.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/BestPractices.md b/docs/BestPractices.md index 560decfed..233a1a603 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -4,20 +4,20 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). ## Install Recommendation - There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. + There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). Filesystem Considerations The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: mkfs.ext4 ... -O 64bit,huge_file,extent -T huge ## Operation System - When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. + When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. ## VictoriaMetrics Versions - Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. + Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. ## Upgrade - It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. + It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. It is also safe to downgrade to the previous version unless release notes say otherwise. The following steps must be performed during the upgrade / downgrade process: Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. @@ -26,33 +26,33 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. ## Security - Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: + Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. - Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. + Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. ## Backup Recommendations - VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. + VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. ## Networking - Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. + Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. ## Storage Considerations - Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. + Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. ## RAM - RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. + RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. ## CPU - CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. + CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. ## Technical Support and Services - If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. + If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. Following VictoriaMetrics best practices allows for the optimal configuration of our fast and scalable monitoring solution and time series database while minimizing or avoiding downtime or performance issues during installation and software usage. Our best practices also allow you to quickly troubleshoot any issues that might arise. From 45ede6ba980191e375034c2625f4fc929d43f400 Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:08:38 +0200 Subject: [PATCH 15/59] Add files via upload --- docs/BestPractices.md | 58 +++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/docs/BestPractices.md b/docs/BestPractices.md index 233a1a603..57c7f9984 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -4,55 +4,55 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). ## Install Recommendation - There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. - There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). - Filesystem Considerations - The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: - mkfs.ext4 ... -O 64bit,huge_file,extent -T huge + There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. + There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). + Filesystem Considerations + The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: + mkfs.ext4 ... -O 64bit,huge_file,extent -T huge ## Operation System - When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. + When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. ## VictoriaMetrics Versions - Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. + Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. ## Upgrade - It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. - It is also safe to downgrade to the previous version unless release notes say otherwise. - The following steps must be performed during the upgrade / downgrade process: - Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. - Wait until the process stops. This can take a few seconds. - Start the upgraded VictoriaMetrics. - Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. + It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. + It is also safe to downgrade to the previous version unless release notes say otherwise. + The following steps must be performed during the upgrade / downgrade process: + Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. + Wait until the process stops. This can take a few seconds. + Start the upgraded VictoriaMetrics. + Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. ## Security - Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: - -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. - -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. - -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. - -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. - -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. - -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. - Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. - It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. + Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: + -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. + -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. + -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. + -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. + -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. + -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. + Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. + It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. ## Backup Recommendations - VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. + VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. ## Networking - Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. + Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. ## Storage Considerations - Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. + Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. ## RAM - RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. + RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. ## CPU - CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. + CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. ## Technical Support and Services - If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. + If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. Following VictoriaMetrics best practices allows for the optimal configuration of our fast and scalable monitoring solution and time series database while minimizing or avoiding downtime or performance issues during installation and software usage. Our best practices also allow you to quickly troubleshoot any issues that might arise. From 314f28fb385273e8c066758c8b70c1d1eeae0356 Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:14:34 +0200 Subject: [PATCH 16/59] Add files via upload --- docs/BestPractices.md | 58 +++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/docs/BestPractices.md b/docs/BestPractices.md index 57c7f9984..6d385ed4d 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -4,55 +4,55 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). ## Install Recommendation - There is no need to tuneVictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. - There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). - Filesystem Considerations - The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: - mkfs.ext4 ... -O 64bit,huge_file,extent -T huge + There is no need to tune VictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). +## Filesystem Considerations + + The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: +mkfs.ext4 ... -O 64bit,huge_file,extent -T huge ## Operation System - When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. + When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. ## VictoriaMetrics Versions - Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. + Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. ## Upgrade - It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. - It is also safe to downgrade to the previous version unless release notes say otherwise. - The following steps must be performed during the upgrade / downgrade process: - Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. - Wait until the process stops. This can take a few seconds. - Start the upgraded VictoriaMetrics. - Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. + It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. +It is also safe to downgrade to the previous version unless release notes say otherwise. +The following steps must be performed during the upgrade / downgrade process: + -Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. + -Wait until the process stops. This can take a few seconds. + -Start the upgraded VictoriaMetrics. +Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. ## Security - Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: - -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. - -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. - -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. - -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. - -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. - -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. - Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. - It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. + Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: + -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. + -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. + -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. + -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. + -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. + -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. + Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. +It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. ## Backup Recommendations - VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. + VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. ## Networking - Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. + Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. ## Storage Considerations - Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. + Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. ## RAM - RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. + RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. ## CPU - CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. + CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. ## Technical Support and Services - If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. + If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. Following VictoriaMetrics best practices allows for the optimal configuration of our fast and scalable monitoring solution and time series database while minimizing or avoiding downtime or performance issues during installation and software usage. Our best practices also allow you to quickly troubleshoot any issues that might arise. From e02265cfa7c64d3660c545bb272ab82b9f7fa618 Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:16:59 +0200 Subject: [PATCH 17/59] Add files via upload --- docs/BestPractices.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/BestPractices.md b/docs/BestPractices.md index 6d385ed4d..a603f7548 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -20,19 +20,23 @@ mkfs.ext4 ... -O 64bit,huge_file,extent -T huge It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. It is also safe to downgrade to the previous version unless release notes say otherwise. The following steps must be performed during the upgrade / downgrade process: + -Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. -Wait until the process stops. This can take a few seconds. -Start the upgraded VictoriaMetrics. + Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. ## Security Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: + -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. + Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. From bc5c4add89c82c557f7d0daedbef099bdd4658c5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 10 Mar 2021 12:01:18 +0200 Subject: [PATCH 18/59] lib/httpserver: export `vm_available_memory_bytes` and `vm_available_cpu_cores` metrics These metrics are useful for tracking the available memory and CPU cores for VictoriaMetrics apps. --- docs/CHANGELOG.md | 1 + lib/httpserver/metrics.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index fd48af9e8..b574ded3d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -7,6 +7,7 @@ - `histogram_stdvar(buckets)` - returns standard variance for the given buckets. - `histogram_stddev(buckets)` - returns standard deviation for the given buckets. * FEATURE: reduce median query duration by up to 2x. See https://github.com/VictoriaMetrics/VictoriaMetrics/commit/18fe0ff14bc78860c5569e2b70de1db78fac61be +* FEATURE: export `vm_available_memory_bytes` and `vm_available_cpu_cores` metrics, which show the number of available RAM and available CPU cores for VictoriaMetrics apps. * FEATURE: vmagent: add ability to replicate scrape targets among `vmagent` instances in the cluster with `-promscrape.cluster.replicationFactor` command-line flag. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets). * FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. diff --git a/lib/httpserver/metrics.go b/lib/httpserver/metrics.go index 281bd0ba2..a14603710 100644 --- a/lib/httpserver/metrics.go +++ b/lib/httpserver/metrics.go @@ -12,6 +12,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" "github.com/VictoriaMetrics/metrics" @@ -48,6 +49,8 @@ func writePrometheusMetrics(w io.Writer) { fmt.Fprintf(w, "vm_app_version{version=%q, short_version=%q} 1\n", buildinfo.Version, versionRe.FindString(buildinfo.Version)) fmt.Fprintf(w, "vm_allowed_memory_bytes %d\n", memory.Allowed()) + fmt.Fprintf(w, "vm_available_memory_bytes %d\n", memory.Allowed()+memory.Remaining()) + fmt.Fprintf(w, "vm_available_cpu_cores %d\n", cgroup.AvailableCPUs()) // Export start time and uptime in seconds fmt.Fprintf(w, "vm_app_start_timestamp %d\n", startTime.Unix()) From e772d1c920ba2bdb6cab4506ea826433b3ff44fb Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 10 Mar 2021 15:06:33 +0200 Subject: [PATCH 19/59] lib/promscrape/discovery/kubernetes: reduce load on Kubernetes API server by using watch bookmarks This allows continuing object watch from the last bookbark instead of reloading all the objects on watch errors or timeouts. See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks --- lib/promscrape/discovery/kubernetes/api_watcher.go | 14 ++++++++------ .../discovery/kubernetes/common_types.go | 1 + lib/promscrape/discovery/kubernetes/endpoints.go | 4 ++++ .../discovery/kubernetes/endpointslices.go | 4 ++++ lib/promscrape/discovery/kubernetes/ingress.go | 4 ++++ lib/promscrape/discovery/kubernetes/node.go | 4 ++++ lib/promscrape/discovery/kubernetes/pod.go | 4 ++++ lib/promscrape/discovery/kubernetes/service.go | 4 ++++ 8 files changed, 33 insertions(+), 6 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index b997f2832..17ad2d636 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -31,6 +31,7 @@ type WatchEvent struct { type object interface { key() string getTargetLabels(aw *apiWatcher) []map[string]string + resourceVersion() string } // parseObjectFunc must parse object from the given data. @@ -247,9 +248,9 @@ var reloadObjectsLocksByRole = map[string]*sync.Mutex{ "ingress": {}, } -func (uw *urlWatcher) resetResourceVersion() { +func (uw *urlWatcher) setResourceVersion(resourceVersion string) { uw.mu.Lock() - uw.resourceVersion = "" + uw.resourceVersion = resourceVersion uw.mu.Unlock() } @@ -339,7 +340,7 @@ func (uw *urlWatcher) watchForUpdates() { delimiter = "&" } timeoutSeconds := time.Duration(0.9 * float64(aw.client.Timeout)).Seconds() - apiURL += delimiter + "watch=1&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds)) + apiURL += delimiter + "watch=1&allowWatchBookmarks=true&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds)) for { if aw.needStop() { return @@ -356,7 +357,6 @@ func (uw *urlWatcher) watchForUpdates() { } logger.Errorf("error when performing a request to %q: %s", requestURL, err) backoffSleep() - uw.resetResourceVersion() continue } if resp.StatusCode != http.StatusOK { @@ -366,10 +366,10 @@ func (uw *urlWatcher) watchForUpdates() { if resp.StatusCode == 410 { // There is no need for sleep on 410 error. See https://kubernetes.io/docs/reference/using-api/api-concepts/#410-gone-responses backoffDelay = time.Second + uw.setResourceVersion("") } else { backoffSleep() } - uw.resetResourceVersion() continue } backoffDelay = time.Second @@ -383,7 +383,6 @@ func (uw *urlWatcher) watchForUpdates() { logger.Errorf("error when reading WatchEvent stream from %q: %s", requestURL, err) } backoffSleep() - uw.resetResourceVersion() continue } } @@ -420,6 +419,9 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { uw.mu.Lock() delete(uw.swosByKey, key) uw.mu.Unlock() + case "BOOKMARK": + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks + uw.setResourceVersion(o.resourceVersion()) default: return fmt.Errorf("unexpected WatchEvent type %q for role %q", we.Type, uw.role) } diff --git a/lib/promscrape/discovery/kubernetes/common_types.go b/lib/promscrape/discovery/kubernetes/common_types.go index be93bbb4a..0fffe5df4 100644 --- a/lib/promscrape/discovery/kubernetes/common_types.go +++ b/lib/promscrape/discovery/kubernetes/common_types.go @@ -8,6 +8,7 @@ import ( // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta type ObjectMeta struct { + ResourceVersion string Name string Namespace string UID string diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index 003cceca5..b3f8c2bff 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -47,6 +47,10 @@ type Endpoints struct { Subsets []EndpointSubset } +func (eps *Endpoints) resourceVersion() string { + return eps.Metadata.ResourceVersion +} + // EndpointSubset implements k8s endpoint subset. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#endpointsubset-v1-core diff --git a/lib/promscrape/discovery/kubernetes/endpointslices.go b/lib/promscrape/discovery/kubernetes/endpointslices.go index cec87ce13..14629a819 100644 --- a/lib/promscrape/discovery/kubernetes/endpointslices.go +++ b/lib/promscrape/discovery/kubernetes/endpointslices.go @@ -161,6 +161,10 @@ type EndpointSlice struct { Ports []EndpointPort } +func (eps *EndpointSlice) resourceVersion() string { + return eps.Metadata.ResourceVersion +} + // Endpoint implements kubernetes object endpoint for endpoint slice. // https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#endpoint-v1beta1-discovery-k8s-io type Endpoint struct { diff --git a/lib/promscrape/discovery/kubernetes/ingress.go b/lib/promscrape/discovery/kubernetes/ingress.go index 38aef79d3..e71aa234f 100644 --- a/lib/promscrape/discovery/kubernetes/ingress.go +++ b/lib/promscrape/discovery/kubernetes/ingress.go @@ -45,6 +45,10 @@ type Ingress struct { Spec IngressSpec } +func (ig *Ingress) resourceVersion() string { + return ig.Metadata.ResourceVersion +} + // IngressSpec represents ingress spec in k8s. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#ingressspec-v1beta1-extensions diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go index 653a99e73..08ca03388 100644 --- a/lib/promscrape/discovery/kubernetes/node.go +++ b/lib/promscrape/discovery/kubernetes/node.go @@ -48,6 +48,10 @@ type Node struct { Status NodeStatus } +func (n *Node) resourceVersion() string { + return n.Metadata.ResourceVersion +} + // NodeStatus represents NodeStatus from k8s API. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#nodestatus-v1-core diff --git a/lib/promscrape/discovery/kubernetes/pod.go b/lib/promscrape/discovery/kubernetes/pod.go index 80864d25e..041f17760 100644 --- a/lib/promscrape/discovery/kubernetes/pod.go +++ b/lib/promscrape/discovery/kubernetes/pod.go @@ -50,6 +50,10 @@ type Pod struct { Status PodStatus } +func (p *Pod) resourceVersion() string { + return p.Metadata.ResourceVersion +} + // PodSpec implements k8s pod spec. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podspec-v1-core diff --git a/lib/promscrape/discovery/kubernetes/service.go b/lib/promscrape/discovery/kubernetes/service.go index c129816e6..fe209281e 100644 --- a/lib/promscrape/discovery/kubernetes/service.go +++ b/lib/promscrape/discovery/kubernetes/service.go @@ -47,6 +47,10 @@ type Service struct { Spec ServiceSpec } +func (s *Service) resourceVersion() string { + return s.Metadata.ResourceVersion +} + // ServiceSpec is k8s service spec. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core From 238fe7d4e8f8200974638d16ac7cedd289aa699c Mon Sep 17 00:00:00 2001 From: Brensted <75306281+Brensted@users.noreply.github.com> Date: Thu, 11 Mar 2021 11:39:14 +0200 Subject: [PATCH 20/59] Update BestPractices.md (#1123) update lists, hyperlinks fixed. --- docs/BestPractices.md | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/docs/BestPractices.md b/docs/BestPractices.md index a603f7548..04cc8bb7b 100644 --- a/docs/BestPractices.md +++ b/docs/BestPractices.md @@ -1,10 +1,9 @@ # VM best practices - VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database. It can be used as a long-term, remote storage for Prometheus which allows it to gather metrics from different systems and store them in a single location or separate them for different purposes (short-, long-term, responsibility zones etc). ## Install Recommendation - There is no need to tune VictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the number of open files in the OS, so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). + There is no need to tune VictoriaMetrics because it uses reasonable defaults for command-line flags. These flags are automatically adjusted for the available CPU and RAM resources. There is no need for Operating System tuning because VictoriaMetrics is optimized for default OS settings. The only option is to increase the limit on the [number of open files in the OS](https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a), so Prometheus instances could establish more connections to VictoriaMetrics (65535 standard production value). ## Filesystem Considerations The recommended filesystem is ext4. If you plan to store more than 1TB of data on ext4 partition or plan to extend it to more than 16TB, then the following options are recommended to pass to mkfs.ext4: @@ -14,46 +13,46 @@ mkfs.ext4 ... -O 64bit,huge_file,extent -T huge When configuring VictoriaMetrics, the best practice is to use the latest Ubuntu OS version. ## VictoriaMetrics Versions - Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow this link. + Always update VictoriaMetrics instances in the environment to avoid version and build mismatch that will result in differences in performance and operational features. It is strongly recommended that you keep VictoriaMetrics in the environment up-to-date and install all VictoriaMetrics updates as soon as they are available. The best place to find the most recent updates as soon as they are available is to follow [this link](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). ## Upgrade - It is safe to upgrade VictoriaMetrics to new versions unless the release notes say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. + It is safe to upgrade VictoriaMetrics to new versions unless the [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe to skip multiple versions during the upgrade unless release notes say otherwise. It is recommended to perform regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features. It is also safe to downgrade to the previous version unless release notes say otherwise. The following steps must be performed during the upgrade / downgrade process: - -Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. - -Wait until the process stops. This can take a few seconds. - -Start the upgraded VictoriaMetrics. + * Send SIGINT signal to VictoriaMetrics process so that it is stopped gracefully. + * Wait until the process stops. This can take a few seconds. + * Start the upgraded VictoriaMetrics. -Prometheus doesn't drop data during theVictoriaMetrics restart. See this article for details. +Prometheus doesn't drop data during the VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. ## Security Do not forget to protect sensitive endpoints in VictoriaMetrics when exposing them to untrusted networks such as the internet. Please consider setting the following command-line flags: - -tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. - -httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with HTTP Basic Authentication. - -deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to delete time series. - -snapshotAuthKey for protecting /snapshot* endpoints. See how to work with snapshots. - -forceMergeAuthKey for protecting /internal/force_merge endpoint. See force merge docs. - -search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See backfilling for more details. + * tls, -tlsCertFile and -tlsKeyFile for switching from HTTP to HTTPS. + * httpAuth.username and -httpAuth.password for protecting all the HTTP endpoints with [HTTP Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication). + * deleteAuthKey for protecting /api/v1/admin/tsdb/delete_series endpoint. See how to [delete time series](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-delete-time-series). + * snapshotAuthKey for protecting /snapshot* endpoints. See [how to work with snapshots](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots). + * forceMergeAuthKey for protecting /internal/force_merge endpoint. See [force merge docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#forced-merge). + * search.resetCacheAuthKey for protecting /internal/resetRollupResultCache endpoint. See [backfilling](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#backfilling) for more details. Explicitly set internal network interface to TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats. For example, substitute -graphiteListenAddr=:2003 with -graphiteListenAddr=:2003. -It is preferable to authorize all incoming requests from untrusted networks with vmauth or a similar auth proxy. +It is preferable to authorize all incoming requests from untrusted networks with [vmauth](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmauth/README.md) or a similar auth proxy. ## Backup Recommendations - VictoriaMetrics supports backups via vmbackup and vmrestore tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see this issue for additional details. + VictoriaMetrics supports backups via [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md) and [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md) tools. We also provide the vmbackuper tool for our paid, enterprise subscribers - see [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for additional details. ## Networking - Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via Prometheus remote_write API. The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. + Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size of label values. A higher number of per-metric labels and longer label values result inhigher ingress bandwidth. ## Storage Considerations - Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read this article for details. + Storage space: VictoriaMetrics needs less than a byte per data point on average. So, ~260GB is required to store a month-long insert stream of 100K data points per second. The actual storage size depends largely on data randomness (entropy). Higher randomness means higher storage size requirements. Read [this article](https://medium.com/faun/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932) for details. ## RAM RAM size: VictoriaMetrics needs less than 1KB per active time series. Therefore, ~1GB of RAM is required for 1M active time series. Time series are considered active if new data points have been added recently or if they have been recently queried. The number of active time series may be obtained from vm_cache_entries{type="storage/hour_metric_ids"} metric exported on the /metrics page. VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with -memory.allowedPercent or -memory.allowedBytes flags. ## CPU - CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See this article for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. + CPU cores: VictoriaMetrics needs one CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with a high number of labels. See [this article](https://valyala.medium.com/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) for details. If you see lower numbers per CPU core, it is likely that the active time series info doesn't fit in your caches and you will need more RAM to lower CPU usage. ## Technical Support and Services If you have questions about installing or using this software pleasecheck this and other documents first. Answers to the most frequently askedquestions can be found on the Technical Papers webpage or in VictoriaMetrics community channels. If you need further assistance with VictoriaMetrics, please contact us at info@victoriametrics.com - we’ll be happy to help. From 971e3d83f7401e69465e2bbbc800f79a691ec9d9 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 11 Mar 2021 11:42:04 +0200 Subject: [PATCH 21/59] docs/ExttendedPromQL.md: remove outdated doc --- docs/ExtendedPromQL.md | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 docs/ExtendedPromQL.md diff --git a/docs/ExtendedPromQL.md b/docs/ExtendedPromQL.md deleted file mode 100644 index e268ac091..000000000 --- a/docs/ExtendedPromQL.md +++ /dev/null @@ -1,3 +0,0 @@ -# MetricsQL - -The page has been moved to [MetricsQL](https://victoriametrics.github.io/MetricsQL.html). From bebcb8130c17a3038e976d4d738eb964d21f28ec Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 11 Mar 2021 13:06:40 +0200 Subject: [PATCH 22/59] lib/promscrape/discovery/kubernetes: localize Bookmark parsing code This is a follow-up for e772d1c920ba2bdb6cab4506ea826433b3ff44fb --- .../discovery/kubernetes/api_watcher.go | 38 +++++++++++++++---- .../discovery/kubernetes/api_watcher_test.go | 12 ++++++ .../discovery/kubernetes/common_types.go | 1 - .../discovery/kubernetes/endpoints.go | 4 -- .../discovery/kubernetes/endpointslices.go | 4 -- .../discovery/kubernetes/ingress.go | 4 -- lib/promscrape/discovery/kubernetes/node.go | 4 -- lib/promscrape/discovery/kubernetes/pod.go | 4 -- .../discovery/kubernetes/service.go | 4 -- 9 files changed, 43 insertions(+), 32 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 17ad2d636..5d4a30273 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -31,7 +31,6 @@ type WatchEvent struct { type object interface { key() string getTargetLabels(aw *apiWatcher) []map[string]string - resourceVersion() string } // parseObjectFunc must parse object from the given data. @@ -397,13 +396,13 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { if err := d.Decode(&we); err != nil { return err } - o, err := uw.parseObject(we.Object) - if err != nil { - return err - } - key := o.key() switch we.Type { case "ADDED", "MODIFIED": + o, err := uw.parseObject(we.Object) + if err != nil { + return err + } + key := o.key() uw.objectsByKey.update(key, o) labels := o.getTargetLabels(aw) swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) @@ -415,19 +414,44 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { } uw.mu.Unlock() case "DELETED": + o, err := uw.parseObject(we.Object) + if err != nil { + return err + } + key := o.key() uw.objectsByKey.remove(key) uw.mu.Lock() delete(uw.swosByKey, key) uw.mu.Unlock() case "BOOKMARK": // See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks - uw.setResourceVersion(o.resourceVersion()) + bm, err := parseBookmark(we.Object) + if err != nil { + return fmt.Errorf("cannot parse bookmark from %q: %w", we.Object, err) + } + uw.setResourceVersion(bm.Metadata.ResourceVersion) default: return fmt.Errorf("unexpected WatchEvent type %q for role %q", we.Type, uw.role) } } } +// Bookmark is a bookmark from Kubernetes Watch API. +// See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +type Bookmark struct { + Metadata struct { + ResourceVersion string + } +} + +func parseBookmark(data []byte) (*Bookmark, error) { + var bm Bookmark + if err := json.Unmarshal(data, &bm); err != nil { + return nil, err + } + return &bm, nil +} + func getAPIPaths(role string, namespaces []string, selectors []Selector) []string { objectName := getObjectNameByRole(role) if objectName == "nodes" || len(namespaces) == 0 { diff --git a/lib/promscrape/discovery/kubernetes/api_watcher_test.go b/lib/promscrape/discovery/kubernetes/api_watcher_test.go index 3ce42961b..7d56ada08 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher_test.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher_test.go @@ -160,3 +160,15 @@ func TestGetAPIPaths(t *testing.T) { "/apis/networking.k8s.io/v1beta1/namespaces/y/ingresses?labelSelector=cde%2Cbaaa&fieldSelector=abc", }) } + +func TestParseBookmark(t *testing.T) { + data := `{"kind": "Pod", "apiVersion": "v1", "metadata": {"resourceVersion": "12746"} }` + bm, err := parseBookmark([]byte(data)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + expectedResourceVersion := "12746" + if bm.Metadata.ResourceVersion != expectedResourceVersion { + t.Fatalf("unexpected resourceVersion; got %q; want %q", bm.Metadata.ResourceVersion, expectedResourceVersion) + } +} diff --git a/lib/promscrape/discovery/kubernetes/common_types.go b/lib/promscrape/discovery/kubernetes/common_types.go index 0fffe5df4..be93bbb4a 100644 --- a/lib/promscrape/discovery/kubernetes/common_types.go +++ b/lib/promscrape/discovery/kubernetes/common_types.go @@ -8,7 +8,6 @@ import ( // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta type ObjectMeta struct { - ResourceVersion string Name string Namespace string UID string diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index b3f8c2bff..003cceca5 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -47,10 +47,6 @@ type Endpoints struct { Subsets []EndpointSubset } -func (eps *Endpoints) resourceVersion() string { - return eps.Metadata.ResourceVersion -} - // EndpointSubset implements k8s endpoint subset. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#endpointsubset-v1-core diff --git a/lib/promscrape/discovery/kubernetes/endpointslices.go b/lib/promscrape/discovery/kubernetes/endpointslices.go index 14629a819..cec87ce13 100644 --- a/lib/promscrape/discovery/kubernetes/endpointslices.go +++ b/lib/promscrape/discovery/kubernetes/endpointslices.go @@ -161,10 +161,6 @@ type EndpointSlice struct { Ports []EndpointPort } -func (eps *EndpointSlice) resourceVersion() string { - return eps.Metadata.ResourceVersion -} - // Endpoint implements kubernetes object endpoint for endpoint slice. // https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#endpoint-v1beta1-discovery-k8s-io type Endpoint struct { diff --git a/lib/promscrape/discovery/kubernetes/ingress.go b/lib/promscrape/discovery/kubernetes/ingress.go index e71aa234f..38aef79d3 100644 --- a/lib/promscrape/discovery/kubernetes/ingress.go +++ b/lib/promscrape/discovery/kubernetes/ingress.go @@ -45,10 +45,6 @@ type Ingress struct { Spec IngressSpec } -func (ig *Ingress) resourceVersion() string { - return ig.Metadata.ResourceVersion -} - // IngressSpec represents ingress spec in k8s. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#ingressspec-v1beta1-extensions diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go index 08ca03388..653a99e73 100644 --- a/lib/promscrape/discovery/kubernetes/node.go +++ b/lib/promscrape/discovery/kubernetes/node.go @@ -48,10 +48,6 @@ type Node struct { Status NodeStatus } -func (n *Node) resourceVersion() string { - return n.Metadata.ResourceVersion -} - // NodeStatus represents NodeStatus from k8s API. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#nodestatus-v1-core diff --git a/lib/promscrape/discovery/kubernetes/pod.go b/lib/promscrape/discovery/kubernetes/pod.go index 041f17760..80864d25e 100644 --- a/lib/promscrape/discovery/kubernetes/pod.go +++ b/lib/promscrape/discovery/kubernetes/pod.go @@ -50,10 +50,6 @@ type Pod struct { Status PodStatus } -func (p *Pod) resourceVersion() string { - return p.Metadata.ResourceVersion -} - // PodSpec implements k8s pod spec. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podspec-v1-core diff --git a/lib/promscrape/discovery/kubernetes/service.go b/lib/promscrape/discovery/kubernetes/service.go index fe209281e..c129816e6 100644 --- a/lib/promscrape/discovery/kubernetes/service.go +++ b/lib/promscrape/discovery/kubernetes/service.go @@ -47,10 +47,6 @@ type Service struct { Spec ServiceSpec } -func (s *Service) resourceVersion() string { - return s.Metadata.ResourceVersion -} - // ServiceSpec is k8s service spec. // // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core From c0ac740f9316d6185cb8794b8deebee3f9de7d5c Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 11 Mar 2021 13:43:36 +0200 Subject: [PATCH 23/59] lib/proxy: do not show inline basic auth passwords when logging errors related to `proxy_url` --- lib/proxy/proxy.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 0bb6f9e8d..8915ba2e1 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -57,7 +57,7 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { } pu := u.url if pu.Scheme != "http" && pu.Scheme != "https" { - return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be http or https", pu.Scheme, pu) + return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be http or https", pu.Scheme, pu.Redacted()) } isTLS := pu.Scheme == "https" proxyAddr := addMissingPort(pu.Host, isTLS) @@ -76,7 +76,7 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { dialFunc := func(addr string) (net.Conn, error) { proxyConn, err := defaultDialFunc(proxyAddr) if err != nil { - return nil, fmt.Errorf("cannot connect to proxy %q: %w", pu, err) + return nil, fmt.Errorf("cannot connect to proxy %q: %w", pu.Redacted(), err) } if isTLS { tlsCfgLocal := tlsCfg @@ -89,7 +89,7 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader) if err != nil { _ = proxyConn.Close() - return nil, fmt.Errorf("error when sending CONNECT request to proxy %q: %w", pu, err) + return nil, fmt.Errorf("error when sending CONNECT request to proxy %q: %w", pu.Redacted(), err) } return conn, nil } From dc0cb54d411cc3877664830a177f0a5966d86830 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 11 Mar 2021 16:39:03 +0200 Subject: [PATCH 24/59] deployment/docker: update Go builder from 1.16.0 to 1.16.1 See https://github.com/golang/go/issues?q=milestone%3AGo1.16.1+label%3ACherryPickApproved --- deployment/docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/docker/Makefile b/deployment/docker/Makefile index ccdd75c03..8b760435f 100644 --- a/deployment/docker/Makefile +++ b/deployment/docker/Makefile @@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics ROOT_IMAGE ?= alpine:3.13.2 CERTS_IMAGE := alpine:3.13.2 -GO_BUILDER_IMAGE := golang:1.16.0 +GO_BUILDER_IMAGE := golang:1.16.1 BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _) BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _) From 133b288681978d82cd3a4c6125824346b9344f9f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 11 Mar 2021 16:41:09 +0200 Subject: [PATCH 25/59] lib/promscrape/discovery/kubernetes: use a single watcher per apiURL Previously multiple scrape jobs could create multiple watchers for the same apiURL. Now only a single watcher is used. This should reduce load on Kubernetes API server when many scrape job configs use Kubernetes service discovery. --- lib/promscrape/discovery/kubernetes/api.go | 26 +- .../discovery/kubernetes/api_watcher.go | 712 +++++++++--------- .../discovery/kubernetes/endpoints.go | 8 +- .../discovery/kubernetes/endpoints_test.go | 7 +- .../discovery/kubernetes/endpointslices.go | 8 +- .../kubernetes/endpointslices_test.go | 7 +- .../discovery/kubernetes/ingress.go | 8 +- .../discovery/kubernetes/ingress_test.go | 7 +- .../discovery/kubernetes/kubernetes.go | 7 +- lib/promscrape/discovery/kubernetes/node.go | 8 +- .../discovery/kubernetes/node_test.go | 7 +- lib/promscrape/discovery/kubernetes/pod.go | 8 +- .../discovery/kubernetes/pod_test.go | 7 +- .../discovery/kubernetes/service.go | 8 +- .../discovery/kubernetes/service_test.go | 7 +- 15 files changed, 411 insertions(+), 424 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api.go b/lib/promscrape/discovery/kubernetes/api.go index cd20d89ee..5596c7f5b 100644 --- a/lib/promscrape/discovery/kubernetes/api.go +++ b/lib/promscrape/discovery/kubernetes/api.go @@ -1,21 +1,15 @@ package kubernetes import ( - "flag" "fmt" "net" - "net/http" - "net/url" "os" "strings" - "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) -var apiServerTimeout = flag.Duration("promscrape.kubernetes.apiServerTimeout", 30*time.Minute, "How frequently to reload the full state from Kuberntes API server") - // apiConfig contains config for API server type apiConfig struct { aw *apiWatcher @@ -36,6 +30,11 @@ func getAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu } func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFunc) (*apiConfig, error) { + switch sdc.Role { + case "node", "pod", "service", "endpoints", "endpointslices", "ingress": + default: + return nil, fmt.Errorf("unexpected `role`: %q; must be one of `node`, `pod`, `service`, `endpoints`, `endpointslices` or `ingress`", sdc.Role) + } ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig) if err != nil { return nil, fmt.Errorf("cannot parse auth config: %w", err) @@ -75,20 +74,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu for strings.HasSuffix(apiServer, "/") { apiServer = apiServer[:len(apiServer)-1] } - var proxy func(*http.Request) (*url.URL, error) - if proxyURL := sdc.ProxyURL.URL(); proxyURL != nil { - proxy = http.ProxyURL(proxyURL) - } - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: ac.NewTLSConfig(), - Proxy: proxy, - TLSHandshakeTimeout: 10 * time.Second, - IdleConnTimeout: *apiServerTimeout, - }, - Timeout: *apiServerTimeout, - } - aw := newAPIWatcher(client, apiServer, ac.Authorization, sdc.Namespaces.Names, sdc.Selectors, swcFunc) + aw := newAPIWatcher(apiServer, ac, sdc, swcFunc) cfg := &apiConfig{ aw: aw, } diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 5d4a30273..26e407690 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -1,9 +1,9 @@ package kubernetes import ( - "context" "encoding/json" "errors" + "flag" "fmt" "io" "io/ioutil" @@ -16,9 +16,12 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/metrics" ) +var apiServerTimeout = flag.Duration("promscrape.kubernetes.apiServerTimeout", 30*time.Minute, "How frequently to reload the full state from Kuberntes API server") + // WatchEvent is a watch event returned from API server endpoints if `watch=1` query arg is set. // // See https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes @@ -36,275 +39,83 @@ type object interface { // parseObjectFunc must parse object from the given data. type parseObjectFunc func(data []byte) (object, error) -// parseObjectListFunc must parse objectList from the given data. -type parseObjectListFunc func(data []byte) (map[string]object, ListMeta, error) +// parseObjectListFunc must parse objectList from the given r. +type parseObjectListFunc func(r io.Reader) (map[string]object, ListMeta, error) // apiWatcher is used for watching for Kuberntes object changes and caching their latest states. type apiWatcher struct { - // The client used for watching for object changes - client *http.Client - // Kubenetes API server address in the form http://api-server apiServer string - // The contents for `Authorization` HTTP request header - authorization string + // ac contains auth config for communicating with apiServer + ac *promauth.Config - // Namespaces to watch - namespaces []string + // sdc contains the related SDConfig + sdc *SDConfig - // Selectors to apply during watch - selectors []Selector - - // Constructor for creating ScrapeWork objects from labels. + // Constructor for creating ScrapeWork objects from labels swcFunc ScrapeWorkConstructorFunc - // mu protects watchersByURL - mu sync.Mutex + // swos contains a map of ScrapeWork objects for the given apiWatcher + swosByKey map[string][]interface{} + swosByKeyLock sync.Mutex // a map of watchers keyed by request urls - watchersByURL map[string]*urlWatcher + watchersByURL map[string]*urlWatcher + watchersByURLLock sync.Mutex - stopFunc func() - stopCtx context.Context - wg sync.WaitGroup + stopCh chan struct{} + wg sync.WaitGroup +} + +func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc ScrapeWorkConstructorFunc) *apiWatcher { + return &apiWatcher{ + apiServer: apiServer, + ac: ac, + sdc: sdc, + swcFunc: swcFunc, + + swosByKey: make(map[string][]interface{}), + watchersByURL: make(map[string]*urlWatcher), + + stopCh: make(chan struct{}), + } } func (aw *apiWatcher) mustStop() { - aw.stopFunc() + close(aw.stopCh) aw.wg.Wait() } -func newAPIWatcher(client *http.Client, apiServer, authorization string, namespaces []string, selectors []Selector, swcFunc ScrapeWorkConstructorFunc) *apiWatcher { - stopCtx, stopFunc := context.WithCancel(context.Background()) - return &apiWatcher{ - apiServer: apiServer, - authorization: authorization, - client: client, - namespaces: namespaces, - selectors: selectors, - swcFunc: swcFunc, - - watchersByURL: make(map[string]*urlWatcher), - - stopFunc: stopFunc, - stopCtx: stopCtx, - } -} - -// getScrapeWorkObjectsForRole returns all the ScrapeWork objects for the given role. -func (aw *apiWatcher) getScrapeWorkObjectsForRole(role string) []interface{} { - aw.startWatchersForRole(role) - var swos []interface{} - aw.mu.Lock() - for _, uw := range aw.watchersByURL { - if uw.role != role { - continue - } - uw.mu.Lock() - for _, swosLocal := range uw.swosByKey { - swos = append(swos, swosLocal...) - } - uw.mu.Unlock() - } - aw.mu.Unlock() - return swos -} - -// getObjectByRole returns an object with the given (namespace, name) key and the given role. -func (aw *apiWatcher) getObjectByRole(role, namespace, name string) object { - if aw == nil { - return nil - } - key := namespace + "/" + name - aw.startWatchersForRole(role) - var o object - aw.mu.Lock() - for _, uw := range aw.watchersByURL { - if uw.role != role { - continue - } - o = uw.objectsByKey.get(key) - if o != nil { - break - } - } - aw.mu.Unlock() - return o -} - -func (aw *apiWatcher) startWatchersForRole(role string) { - parseObject, parseObjectList := getObjectParsersForRole(role) - paths := getAPIPaths(role, aw.namespaces, aw.selectors) - for _, path := range paths { - apiURL := aw.apiServer + path - aw.startWatcherForURL(role, apiURL, parseObject, parseObjectList) - } -} - -func (aw *apiWatcher) startWatcherForURL(role, apiURL string, parseObject parseObjectFunc, parseObjectList parseObjectListFunc) { - aw.mu.Lock() - if aw.watchersByURL[apiURL] != nil { - // Watcher for the given path already exists. - aw.mu.Unlock() - return - } - uw := aw.newURLWatcher(role, apiURL, parseObject, parseObjectList) - aw.watchersByURL[apiURL] = uw - aw.mu.Unlock() - - uw.watchersCount.Inc() - uw.watchersCreated.Inc() - uw.reloadObjects() - aw.wg.Add(1) - go func() { - defer aw.wg.Done() - uw.watchForUpdates() - uw.objectsByKey.decRef() - - aw.mu.Lock() - delete(aw.watchersByURL, apiURL) - aw.mu.Unlock() - uw.watchersCount.Dec() - uw.watchersStopped.Inc() - }() -} - -// needStop returns true if aw must be stopped. -func (aw *apiWatcher) needStop() bool { - select { - case <-aw.stopCtx.Done(): - return true - default: - return false - } -} - -// doRequest performs http request to the given requestURL. -func (aw *apiWatcher) doRequest(requestURL string) (*http.Response, error) { - req, err := http.NewRequestWithContext(aw.stopCtx, "GET", requestURL, nil) - if err != nil { - logger.Fatalf("cannot create a request for %q: %s", requestURL, err) - } - if aw.authorization != "" { - req.Header.Set("Authorization", aw.authorization) - } - return aw.client.Do(req) -} - -// urlWatcher watches for an apiURL and updates object states in objectsByKey. -type urlWatcher struct { - role string - apiURL string - - parseObject parseObjectFunc - parseObjectList parseObjectListFunc - - // objectsByKey contains the latest state for objects obtained from apiURL - objectsByKey *objectsMap - - // mu protects swosByKey and resourceVersion - mu sync.Mutex - swosByKey map[string][]interface{} - resourceVersion string - - // the parent apiWatcher - aw *apiWatcher - - watchersCount *metrics.Counter - watchersCreated *metrics.Counter - watchersStopped *metrics.Counter -} - -func (aw *apiWatcher) newURLWatcher(role, apiURL string, parseObject parseObjectFunc, parseObjectList parseObjectListFunc) *urlWatcher { - return &urlWatcher{ - role: role, - apiURL: apiURL, - - parseObject: parseObject, - parseObjectList: parseObjectList, - - objectsByKey: sharedObjectsGlobal.getByAPIURL(role, apiURL), - swosByKey: make(map[string][]interface{}), - - aw: aw, - - watchersCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, role)), - watchersCreated: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers_created_total{role=%q}`, role)), - watchersStopped: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers_stopped_total{role=%q}`, role)), - } -} - -// Limit the concurrency for per-role objects reloading to 1. -// -// This should reduce memory usage when big number of watchers simultaneously receive an update for objects of the same role. -var reloadObjectsLocksByRole = map[string]*sync.Mutex{ - "node": {}, - "pod": {}, - "service": {}, - "endpoints": {}, - "endpointslices": {}, - "ingress": {}, -} - -func (uw *urlWatcher) setResourceVersion(resourceVersion string) { - uw.mu.Lock() - uw.resourceVersion = resourceVersion - uw.mu.Unlock() -} - -// reloadObjects reloads objects to the latest state and returns resourceVersion for the latest state. -func (uw *urlWatcher) reloadObjects() string { - lock := reloadObjectsLocksByRole[uw.role] - lock.Lock() - defer lock.Unlock() - - uw.mu.Lock() - resourceVersion := uw.resourceVersion - uw.mu.Unlock() - if resourceVersion != "" { - // Fast path - objects have been already reloaded by concurrent goroutines. - return resourceVersion - } - - aw := uw.aw - requestURL := uw.apiURL - resp, err := aw.doRequest(requestURL) - if err != nil { - if !aw.needStop() { - logger.Errorf("error when performing a request to %q: %s", requestURL, err) - } - return "" - } - body, _ := ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logger.Errorf("unexpected status code for request to %q: %d; want %d; response: %q", requestURL, resp.StatusCode, http.StatusOK, body) - return "" - } - objectsByKey, metadata, err := uw.parseObjectList(body) - if err != nil { - if !aw.needStop() { - logger.Errorf("cannot parse response from %q: %s", requestURL, err) - } - return "" - } - logger.Infof("loaded %d objects from %q", len(objectsByKey), requestURL) - uw.objectsByKey.reload(objectsByKey) +func (aw *apiWatcher) reloadScrapeWorks(objectsByKey map[string]object) { swosByKey := make(map[string][]interface{}) - for k, o := range objectsByKey { + for key, o := range objectsByKey { labels := o.getTargetLabels(aw) swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) if len(swos) > 0 { - swosByKey[k] = swos + swosByKey[key] = swos } } - uw.mu.Lock() - uw.swosByKey = swosByKey - uw.resourceVersion = metadata.ResourceVersion - uw.mu.Unlock() + aw.swosByKeyLock.Lock() + aw.swosByKey = swosByKey + aw.swosByKeyLock.Unlock() +} - return metadata.ResourceVersion +func (aw *apiWatcher) setScrapeWorks(key string, labels []map[string]string) { + swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) + aw.swosByKeyLock.Lock() + if len(swos) > 0 { + aw.swosByKey[key] = swos + } else { + delete(aw.swosByKey, key) + } + aw.swosByKeyLock.Unlock() +} + +func (aw *apiWatcher) removeScrapeWorks(key string) { + aw.swosByKeyLock.Lock() + delete(aw.swosByKey, key) + aw.swosByKeyLock.Unlock() } func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []map[string]string) []interface{} { @@ -319,11 +130,279 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss [] return swos } +// getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. +func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { + aw.startWatchersForRole(aw.sdc.Role) + aw.swosByKeyLock.Lock() + defer aw.swosByKeyLock.Unlock() + + size := 0 + for _, swosLocal := range aw.swosByKey { + size += len(swosLocal) + } + swos := make([]interface{}, 0, size) + for _, swosLocal := range aw.swosByKey { + swos = append(swos, swosLocal...) + } + return swos +} + +// getObjectByRole returns an object with the given (namespace, name) key and the given role. +func (aw *apiWatcher) getObjectByRole(role, namespace, name string) object { + if aw == nil { + // this is needed for testing + return nil + } + key := namespace + "/" + name + aw.startWatchersForRole(role) + aw.watchersByURLLock.Lock() + defer aw.watchersByURLLock.Unlock() + + for _, uw := range aw.watchersByURL { + if uw.role != role { + continue + } + uw.mu.Lock() + o := uw.objectsByKey[key] + uw.mu.Unlock() + if o != nil { + return o + } + } + return nil +} + +func (aw *apiWatcher) startWatchersForRole(role string) { + paths := getAPIPaths(role, aw.sdc.Namespaces.Names, aw.sdc.Selectors) + for _, path := range paths { + apiURL := aw.apiServer + path + aw.startWatcherForURL(role, apiURL) + } +} + +func (aw *apiWatcher) startWatcherForURL(role, apiURL string) { + aw.watchersByURLLock.Lock() + if aw.watchersByURL[apiURL] != nil { + // Watcher for the given path already exists. + aw.watchersByURLLock.Unlock() + return + } + uw := getURLWatcher(role, apiURL, aw.sdc.ProxyURL.URL(), aw.ac) + uw.addAPIWatcher(aw) + aw.watchersByURL[apiURL] = uw + aw.watchersByURLLock.Unlock() + + aw.wg.Add(1) + go func() { + defer aw.wg.Done() + <-aw.stopCh + aw.watchersByURLLock.Lock() + uw.removeAPIWatcher(aw) + delete(aw.watchersByURL, apiURL) + aw.watchersByURLLock.Unlock() + }() +} + +func getURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) *urlWatcher { + key := fmt.Sprintf("url=%s, proxyURL=%v, authConfig=%s", apiURL, proxyURL, ac.String()) + urlWatchersLock.Lock() + uw := urlWatchers[key] + logger.Infof("found watcher for key=%s", key) + if uw == nil { + uw = newURLWatcher(role, apiURL, proxyURL, ac) + urlWatchers[key] = uw + logger.Infof("registered watcher for key=%s", key) + } + urlWatchersLock.Unlock() + return uw +} + +var ( + urlWatchersLock sync.Mutex + urlWatchers = make(map[string]*urlWatcher) +) + +// urlWatcher watches for an apiURL and updates object states in objectsByKey. +type urlWatcher struct { + role string + apiURL string + authorization string + client *http.Client + + parseObject parseObjectFunc + parseObjectList parseObjectListFunc + + // mu protects aws, objectsByKey and resourceVersion + mu sync.Mutex + + // aws contains registered apiWatcher objects + aws map[*apiWatcher]struct{} + + // objectsByKey contains the latest state for objects obtained from apiURL + objectsByKey map[string]object + + resourceVersion string + + objectsCount *metrics.Counter + objectsAdded *metrics.Counter + objectsRemoved *metrics.Counter + objectsUpdated *metrics.Counter +} + +func newURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) *urlWatcher { + var proxy func(*http.Request) (*url.URL, error) + if proxyURL != nil { + proxy = http.ProxyURL(proxyURL) + } + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: ac.NewTLSConfig(), + Proxy: proxy, + TLSHandshakeTimeout: 10 * time.Second, + IdleConnTimeout: *apiServerTimeout, + }, + Timeout: *apiServerTimeout, + } + parseObject, parseObjectList := getObjectParsersForRole(role) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, role)).Inc() + uw := &urlWatcher{ + role: role, + apiURL: apiURL, + authorization: ac.Authorization, + client: client, + + parseObject: parseObject, + parseObjectList: parseObjectList, + + aws: make(map[*apiWatcher]struct{}), + objectsByKey: make(map[string]object), + + objectsCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects{role=%q}`, role)), + objectsAdded: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_added_total{role=%q}`, role)), + objectsRemoved: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_removed_total{role=%q}`, role)), + objectsUpdated: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_updated_total{role=%q}`, role)), + } + uw.reloadObjects() + go uw.watchForUpdates() + return uw +} + +func (uw *urlWatcher) addAPIWatcher(aw *apiWatcher) { + uw.mu.Lock() + if _, ok := uw.aws[aw]; ok { + logger.Panicf("BUG: aw=%p has been already added", aw) + } + uw.aws[aw] = struct{}{} + objectsByKey := make(map[string]object) + for key, o := range uw.objectsByKey { + objectsByKey[key] = o + } + uw.mu.Unlock() + + aw.reloadScrapeWorks(objectsByKey) +} + +func (uw *urlWatcher) removeAPIWatcher(aw *apiWatcher) { + uw.mu.Lock() + if _, ok := uw.aws[aw]; !ok { + logger.Panicf("BUG: aw=%p is missing", aw) + } + delete(uw.aws, aw) + uw.mu.Unlock() +} + +// doRequest performs http request to the given requestURL. +func (uw *urlWatcher) doRequest(requestURL string) (*http.Response, error) { + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + logger.Fatalf("cannot create a request for %q: %s", requestURL, err) + } + if uw.authorization != "" { + req.Header.Set("Authorization", uw.authorization) + } + return uw.client.Do(req) +} + +func (uw *urlWatcher) setResourceVersion(resourceVersion string) { + uw.mu.Lock() + uw.resourceVersion = resourceVersion + uw.mu.Unlock() +} + +// reloadObjects reloads objects to the latest state and returns resourceVersion for the latest state. +func (uw *urlWatcher) reloadObjects() string { + uw.mu.Lock() + resourceVersion := uw.resourceVersion + uw.mu.Unlock() + if resourceVersion != "" { + // Fast path - there is no need in reloading the objects. + return resourceVersion + } + + requestURL := uw.apiURL + resp, err := uw.doRequest(requestURL) + if err != nil { + logger.Errorf("cannot perform request to %q: %s", requestURL, err) + return "" + } + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + logger.Errorf("unexpected status code for request to %q: %d; want %d; response: %q", requestURL, resp.StatusCode, http.StatusOK, body) + return "" + } + objectsByKey, metadata, err := uw.parseObjectList(resp.Body) + _ = resp.Body.Close() + if err != nil { + logger.Errorf("cannot parse objects from %q: %s", requestURL, err) + return "" + } + + uw.mu.Lock() + var updated, removed, added int + for key := range uw.objectsByKey { + if o, ok := objectsByKey[key]; ok { + uw.objectsByKey[key] = o + updated++ + } else { + delete(uw.objectsByKey, key) + removed++ + } + } + for key, o := range objectsByKey { + if _, ok := uw.objectsByKey[key]; !ok { + uw.objectsByKey[key] = o + added++ + } + } + uw.objectsUpdated.Add(updated) + uw.objectsRemoved.Add(removed) + uw.objectsAdded.Add(added) + uw.objectsCount.Add(added - removed) + uw.resourceVersion = metadata.ResourceVersion + uw.mu.Unlock() + + for _, aw := range uw.getAPIWatchers() { + aw.reloadScrapeWorks(objectsByKey) + } + logger.Infof("loaded %d objects from %q", len(objectsByKey), requestURL) + return metadata.ResourceVersion +} + +func (uw *urlWatcher) getAPIWatchers() []*apiWatcher { + uw.mu.Lock() + aws := make([]*apiWatcher, 0, len(uw.aws)) + for aw := range uw.aws { + aws = append(aws, aw) + } + uw.mu.Unlock() + return aws +} + // watchForUpdates watches for object updates starting from uw.resourceVersion and updates the corresponding objects to the latest state. // // See https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes func (uw *urlWatcher) watchForUpdates() { - aw := uw.aw backoffDelay := time.Second maxBackoffDelay := 30 * time.Second backoffSleep := func() { @@ -338,23 +417,17 @@ func (uw *urlWatcher) watchForUpdates() { if strings.Contains(apiURL, "?") { delimiter = "&" } - timeoutSeconds := time.Duration(0.9 * float64(aw.client.Timeout)).Seconds() + timeoutSeconds := time.Duration(0.9 * float64(uw.client.Timeout)).Seconds() apiURL += delimiter + "watch=1&allowWatchBookmarks=true&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds)) for { - if aw.needStop() { - return - } resourceVersion := uw.reloadObjects() requestURL := apiURL if resourceVersion != "" { requestURL += "&resourceVersion=" + url.QueryEscape(resourceVersion) } - resp, err := aw.doRequest(requestURL) + resp, err := uw.doRequest(requestURL) if err != nil { - if aw.needStop() { - return - } - logger.Errorf("error when performing a request to %q: %s", requestURL, err) + logger.Errorf("cannot performing request to %q: %s", requestURL, err) backoffSleep() continue } @@ -375,9 +448,6 @@ func (uw *urlWatcher) watchForUpdates() { err = uw.readObjectUpdateStream(resp.Body) _ = resp.Body.Close() if err != nil { - if aw.needStop() { - return - } if !errors.Is(err, io.EOF) { logger.Errorf("error when reading WatchEvent stream from %q: %s", requestURL, err) } @@ -389,7 +459,6 @@ func (uw *urlWatcher) watchForUpdates() { // readObjectUpdateStream reads Kuberntes watch events from r and updates locally cached objects according to the received events. func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { - aw := uw.aw d := json.NewDecoder(r) var we WatchEvent for { @@ -403,26 +472,35 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { return err } key := o.key() - uw.objectsByKey.update(key, o) - labels := o.getTargetLabels(aw) - swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) uw.mu.Lock() - if len(swos) > 0 { - uw.swosByKey[key] = swos + if _, ok := uw.objectsByKey[key]; !ok { + uw.objectsCount.Inc() + uw.objectsAdded.Inc() } else { - delete(uw.swosByKey, key) + uw.objectsUpdated.Inc() } + uw.objectsByKey[key] = o uw.mu.Unlock() + for _, aw := range uw.getAPIWatchers() { + labels := o.getTargetLabels(aw) + aw.setScrapeWorks(key, labels) + } case "DELETED": o, err := uw.parseObject(we.Object) if err != nil { return err } key := o.key() - uw.objectsByKey.remove(key) uw.mu.Lock() - delete(uw.swosByKey, key) + if _, ok := uw.objectsByKey[key]; ok { + uw.objectsCount.Dec() + uw.objectsRemoved.Inc() + delete(uw.objectsByKey, key) + } uw.mu.Unlock() + for _, aw := range uw.getAPIWatchers() { + aw.removeScrapeWorks(key) + } case "BOOKMARK": // See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks bm, err := parseBookmark(we.Object) @@ -546,105 +624,3 @@ func getObjectParsersForRole(role string) (parseObjectFunc, parseObjectListFunc) return nil, nil } } - -type objectsMap struct { - mu sync.Mutex - refCount int - m map[string]object - - objectsAdded *metrics.Counter - objectsRemoved *metrics.Counter - objectsCount *metrics.Counter -} - -func (om *objectsMap) incRef() { - om.mu.Lock() - om.refCount++ - om.mu.Unlock() -} - -func (om *objectsMap) decRef() { - om.mu.Lock() - om.refCount-- - if om.refCount < 0 { - logger.Panicf("BUG: refCount cannot be smaller than 0; got %d", om.refCount) - } - if om.refCount == 0 { - // Free up memory occupied by om.m - om.objectsRemoved.Add(len(om.m)) - om.objectsCount.Add(-len(om.m)) - om.m = make(map[string]object) - } - om.mu.Unlock() -} - -func (om *objectsMap) reload(m map[string]object) { - om.mu.Lock() - om.objectsAdded.Add(len(m)) - om.objectsRemoved.Add(len(om.m)) - om.objectsCount.Add(len(m) - len(om.m)) - for k := range om.m { - delete(om.m, k) - } - for k, o := range m { - om.m[k] = o - } - om.mu.Unlock() -} - -func (om *objectsMap) update(key string, o object) { - om.mu.Lock() - if om.m[key] == nil { - om.objectsAdded.Inc() - om.objectsCount.Inc() - } - om.m[key] = o - om.mu.Unlock() -} - -func (om *objectsMap) remove(key string) { - om.mu.Lock() - if om.m[key] != nil { - om.objectsRemoved.Inc() - om.objectsCount.Dec() - delete(om.m, key) - } - om.mu.Unlock() -} - -func (om *objectsMap) get(key string) object { - om.mu.Lock() - o, ok := om.m[key] - om.mu.Unlock() - if !ok { - return nil - } - return o -} - -type sharedObjects struct { - mu sync.Mutex - oms map[string]*objectsMap -} - -func (so *sharedObjects) getByAPIURL(role, apiURL string) *objectsMap { - so.mu.Lock() - om := so.oms[apiURL] - if om == nil { - om = &objectsMap{ - m: make(map[string]object), - - objectsCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects{role=%q}`, role)), - objectsAdded: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_added_total{role=%q}`, role)), - objectsRemoved: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_removed_total{role=%q}`, role)), - } - so.oms[apiURL] = om - } - so.mu.Unlock() - om.incRef() - return om -} - -var sharedObjectsGlobal = &sharedObjects{ - oms: make(map[string]*objectsMap), -} diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index 003cceca5..5119f513a 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "io" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -11,10 +12,11 @@ func (eps *Endpoints) key() string { return eps.Metadata.key() } -func parseEndpointsList(data []byte) (map[string]object, ListMeta, error) { +func parseEndpointsList(r io.Reader) (map[string]object, ListMeta, error) { var epsl EndpointsList - if err := json.Unmarshal(data, &epsl); err != nil { - return nil, epsl.Metadata, fmt.Errorf("cannot unmarshal EndpointsList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&epsl); err != nil { + return nil, epsl.Metadata, fmt.Errorf("cannot unmarshal EndpointsList: %w", err) } objectsByKey := make(map[string]object) for _, eps := range epsl.Items { diff --git a/lib/promscrape/discovery/kubernetes/endpoints_test.go b/lib/promscrape/discovery/kubernetes/endpoints_test.go index c9649df92..51b30d091 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints_test.go +++ b/lib/promscrape/discovery/kubernetes/endpoints_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" @@ -10,7 +11,8 @@ import ( func TestParseEndpointsListFailure(t *testing.T) { f := func(s string) { t.Helper() - objectsByKey, _, err := parseEndpointsList([]byte(s)) + r := bytes.NewBufferString(s) + objectsByKey, _, err := parseEndpointsList(r) if err == nil { t.Fatalf("expecting non-nil error") } @@ -78,7 +80,8 @@ func TestParseEndpointsListSuccess(t *testing.T) { ] } ` - objectsByKey, meta, err := parseEndpointsList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parseEndpointsList(r) if err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/lib/promscrape/discovery/kubernetes/endpointslices.go b/lib/promscrape/discovery/kubernetes/endpointslices.go index cec87ce13..a8739ee5b 100644 --- a/lib/promscrape/discovery/kubernetes/endpointslices.go +++ b/lib/promscrape/discovery/kubernetes/endpointslices.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "io" "strconv" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" @@ -12,10 +13,11 @@ func (eps *EndpointSlice) key() string { return eps.Metadata.key() } -func parseEndpointSliceList(data []byte) (map[string]object, ListMeta, error) { +func parseEndpointSliceList(r io.Reader) (map[string]object, ListMeta, error) { var epsl EndpointSliceList - if err := json.Unmarshal(data, &epsl); err != nil { - return nil, epsl.Metadata, fmt.Errorf("cannot unmarshal EndpointSliceList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&epsl); err != nil { + return nil, epsl.Metadata, fmt.Errorf("cannot unmarshal EndpointSliceList: %w", err) } objectsByKey := make(map[string]object) for _, eps := range epsl.Items { diff --git a/lib/promscrape/discovery/kubernetes/endpointslices_test.go b/lib/promscrape/discovery/kubernetes/endpointslices_test.go index 3555e42f2..4114e0cff 100644 --- a/lib/promscrape/discovery/kubernetes/endpointslices_test.go +++ b/lib/promscrape/discovery/kubernetes/endpointslices_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" @@ -9,7 +10,8 @@ import ( func TestParseEndpointSliceListFail(t *testing.T) { f := func(data string) { - objectsByKey, _, err := parseEndpointSliceList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, _, err := parseEndpointSliceList(r) if err == nil { t.Errorf("unexpected result, test must fail! data: %s", data) } @@ -175,7 +177,8 @@ func TestParseEndpointSliceListSuccess(t *testing.T) { } ] }` - objectsByKey, meta, err := parseEndpointSliceList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parseEndpointSliceList(r) if err != nil { t.Errorf("cannot parse data for EndpointSliceList: %v", err) return diff --git a/lib/promscrape/discovery/kubernetes/ingress.go b/lib/promscrape/discovery/kubernetes/ingress.go index 38aef79d3..d7f5df66f 100644 --- a/lib/promscrape/discovery/kubernetes/ingress.go +++ b/lib/promscrape/discovery/kubernetes/ingress.go @@ -3,16 +3,18 @@ package kubernetes import ( "encoding/json" "fmt" + "io" ) func (ig *Ingress) key() string { return ig.Metadata.key() } -func parseIngressList(data []byte) (map[string]object, ListMeta, error) { +func parseIngressList(r io.Reader) (map[string]object, ListMeta, error) { var igl IngressList - if err := json.Unmarshal(data, &igl); err != nil { - return nil, igl.Metadata, fmt.Errorf("cannot unmarshal IngressList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&igl); err != nil { + return nil, igl.Metadata, fmt.Errorf("cannot unmarshal IngressList: %w", err) } objectsByKey := make(map[string]object) for _, ig := range igl.Items { diff --git a/lib/promscrape/discovery/kubernetes/ingress_test.go b/lib/promscrape/discovery/kubernetes/ingress_test.go index 5718758f5..f3440ac23 100644 --- a/lib/promscrape/discovery/kubernetes/ingress_test.go +++ b/lib/promscrape/discovery/kubernetes/ingress_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" @@ -10,7 +11,8 @@ import ( func TestParseIngressListFailure(t *testing.T) { f := func(s string) { t.Helper() - objectsByKey, _, err := parseIngressList([]byte(s)) + r := bytes.NewBufferString(s) + objectsByKey, _, err := parseIngressList(r) if err == nil { t.Fatalf("expecting non-nil error") } @@ -70,7 +72,8 @@ func TestParseIngressListSuccess(t *testing.T) { } ] }` - objectsByKey, meta, err := parseIngressList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parseIngressList(r) if err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/lib/promscrape/discovery/kubernetes/kubernetes.go b/lib/promscrape/discovery/kubernetes/kubernetes.go index 2bab34550..b7dc29b64 100644 --- a/lib/promscrape/discovery/kubernetes/kubernetes.go +++ b/lib/promscrape/discovery/kubernetes/kubernetes.go @@ -48,12 +48,7 @@ func (sdc *SDConfig) GetScrapeWorkObjects(baseDir string, swcFunc ScrapeWorkCons if err != nil { return nil, fmt.Errorf("cannot create API config: %w", err) } - switch sdc.Role { - case "node", "pod", "service", "endpoints", "endpointslices", "ingress": - return cfg.aw.getScrapeWorkObjectsForRole(sdc.Role), nil - default: - return nil, fmt.Errorf("unexpected `role`: %q; must be one of `node`, `pod`, `service`, `endpoints`, `endpointslices` or `ingress`; skipping it", sdc.Role) - } + return cfg.aw.getScrapeWorkObjects(), nil } // MustStop stops further usage for sdc. diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go index 653a99e73..a7e392d65 100644 --- a/lib/promscrape/discovery/kubernetes/node.go +++ b/lib/promscrape/discovery/kubernetes/node.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "io" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -12,10 +13,11 @@ func (n *Node) key() string { return n.Metadata.key() } -func parseNodeList(data []byte) (map[string]object, ListMeta, error) { +func parseNodeList(r io.Reader) (map[string]object, ListMeta, error) { var nl NodeList - if err := json.Unmarshal(data, &nl); err != nil { - return nil, nl.Metadata, fmt.Errorf("cannot unmarshal NodeList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&nl); err != nil { + return nil, nl.Metadata, fmt.Errorf("cannot unmarshal NodeList: %w", err) } objectsByKey := make(map[string]object) for _, n := range nl.Items { diff --git a/lib/promscrape/discovery/kubernetes/node_test.go b/lib/promscrape/discovery/kubernetes/node_test.go index 5b957e2f3..1e63ee69a 100644 --- a/lib/promscrape/discovery/kubernetes/node_test.go +++ b/lib/promscrape/discovery/kubernetes/node_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "reflect" "sort" "strconv" @@ -13,7 +14,8 @@ import ( func TestParseNodeListFailure(t *testing.T) { f := func(s string) { t.Helper() - objectsByKey, _, err := parseNodeList([]byte(s)) + r := bytes.NewBufferString(s) + objectsByKey, _, err := parseNodeList(r) if err == nil { t.Fatalf("expecting non-nil error") } @@ -229,7 +231,8 @@ func TestParseNodeListSuccess(t *testing.T) { ] } ` - objectsByKey, meta, err := parseNodeList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parseNodeList(r) if err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/lib/promscrape/discovery/kubernetes/pod.go b/lib/promscrape/discovery/kubernetes/pod.go index 80864d25e..c3e3f314a 100644 --- a/lib/promscrape/discovery/kubernetes/pod.go +++ b/lib/promscrape/discovery/kubernetes/pod.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "io" "strconv" "strings" @@ -13,10 +14,11 @@ func (p *Pod) key() string { return p.Metadata.key() } -func parsePodList(data []byte) (map[string]object, ListMeta, error) { +func parsePodList(r io.Reader) (map[string]object, ListMeta, error) { var pl PodList - if err := json.Unmarshal(data, &pl); err != nil { - return nil, pl.Metadata, fmt.Errorf("cannot unmarshal PodList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&pl); err != nil { + return nil, pl.Metadata, fmt.Errorf("cannot unmarshal PodList: %w", err) } objectsByKey := make(map[string]object) for _, p := range pl.Items { diff --git a/lib/promscrape/discovery/kubernetes/pod_test.go b/lib/promscrape/discovery/kubernetes/pod_test.go index 54ebdec0f..f697d1b84 100644 --- a/lib/promscrape/discovery/kubernetes/pod_test.go +++ b/lib/promscrape/discovery/kubernetes/pod_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" @@ -10,7 +11,8 @@ import ( func TestParsePodListFailure(t *testing.T) { f := func(s string) { t.Helper() - objectsByKey, _, err := parsePodList([]byte(s)) + r := bytes.NewBufferString(s) + objectsByKey, _, err := parsePodList(r) if err == nil { t.Fatalf("expecting non-nil error") } @@ -227,7 +229,8 @@ func TestParsePodListSuccess(t *testing.T) { ] } ` - objectsByKey, meta, err := parsePodList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parsePodList(r) if err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/lib/promscrape/discovery/kubernetes/service.go b/lib/promscrape/discovery/kubernetes/service.go index c129816e6..c5c0ddd8c 100644 --- a/lib/promscrape/discovery/kubernetes/service.go +++ b/lib/promscrape/discovery/kubernetes/service.go @@ -3,6 +3,7 @@ package kubernetes import ( "encoding/json" "fmt" + "io" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -11,10 +12,11 @@ func (s *Service) key() string { return s.Metadata.key() } -func parseServiceList(data []byte) (map[string]object, ListMeta, error) { +func parseServiceList(r io.Reader) (map[string]object, ListMeta, error) { var sl ServiceList - if err := json.Unmarshal(data, &sl); err != nil { - return nil, sl.Metadata, fmt.Errorf("cannot unmarshal ServiceList from %q: %w", data, err) + d := json.NewDecoder(r) + if err := d.Decode(&sl); err != nil { + return nil, sl.Metadata, fmt.Errorf("cannot unmarshal ServiceList: %w", err) } objectsByKey := make(map[string]object) for _, s := range sl.Items { diff --git a/lib/promscrape/discovery/kubernetes/service_test.go b/lib/promscrape/discovery/kubernetes/service_test.go index ca1570ef7..050b2f70e 100644 --- a/lib/promscrape/discovery/kubernetes/service_test.go +++ b/lib/promscrape/discovery/kubernetes/service_test.go @@ -1,6 +1,7 @@ package kubernetes import ( + "bytes" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" @@ -10,7 +11,8 @@ import ( func TestParseServiceListFailure(t *testing.T) { f := func(s string) { t.Helper() - objectsByKey, _, err := parseServiceList([]byte(s)) + r := bytes.NewBufferString(s) + objectsByKey, _, err := parseServiceList(r) if err == nil { t.Fatalf("expecting non-nil error") } @@ -88,7 +90,8 @@ func TestParseServiceListSuccess(t *testing.T) { ] } ` - objectsByKey, meta, err := parseServiceList([]byte(data)) + r := bytes.NewBufferString(data) + objectsByKey, meta, err := parseServiceList(r) if err != nil { t.Fatalf("unexpected error: %s", err) } From f6695315063f85e54217297d81e21804408a7730 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 00:48:28 +0200 Subject: [PATCH 26/59] lib/storage: further tune filters sorting logic --- lib/storage/index_db.go | 60 +++++++++++++++++++------------------- lib/storage/tag_filters.go | 4 +++ 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 99711f00e..fff9c0d77 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -2386,7 +2386,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, filter *uint64set } // Slow path - scan for all the rows with the given prefix. - maxLoopsCount := uint64(maxMetrics) * maxIndexScanSlowLoopsPerMetric + maxLoopsCount := uint64(maxMetrics) * maxIndexScanLoopsPerMetric loopsCount, err := is.getMetricIDsForTagFilterSlow(tf, filter, maxLoopsCount, metricIDs.Add) if err != nil { if err == errFallbackToMetricNameMatch { @@ -2725,7 +2725,7 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set maxDate := uint64(tr.MaxTimestamp) / msecPerDay if maxDate < minDate { // Per-day inverted index doesn't cover the selected date range. - return errFallbackToMetricNameMatch + return fmt.Errorf("maxDate=%d cannot be smaller than minDate=%d", maxDate, minDate) } if maxDate-minDate > maxDaysForDateMetricIDs { // Too much dates must be covered. Give up, since it may be slow. @@ -2788,9 +2788,8 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // This stats is usually collected from the previous queries. // This way we limit the amount of work below by applying fast filters at first. type tagFilterWithWeight struct { - tf *tagFilter - loopsCount uint64 - lastQueryTimestamp uint64 + tf *tagFilter + loopsCount uint64 } tfws := make([]tagFilterWithWeight, len(tfs.tfs)) currentTime := fasttime.UnixTimestamp() @@ -2798,26 +2797,29 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter tf := &tfs.tfs[i] loopsCount, lastQueryTimestamp := is.getLoopsCountAndTimestampForDateFilter(date, tf) origLoopsCount := loopsCount - if currentTime > lastQueryTimestamp+3*3600 { - // Update stats once per 3 hours only for relatively fast tag filters. - // There is no need in spending CPU resources on updating stats for slow tag filters. + if loopsCount == 0 && tf.looksLikeHeavy() { + // Set high loopsCount for heavy tag filters instead of spending CPU time on their execution. + loopsCount = 100e6 + is.storeLoopsCountForDateFilter(date, tf, loopsCount) + } + if currentTime > lastQueryTimestamp+3600 { + // Update stats once per hour for relatively fast tag filters. + // There is no need in spending CPU resources on updating stats for heavy tag filters. if loopsCount <= 10e6 { loopsCount = 0 } } if loopsCount == 0 { - // Prevent from possible thundering herd issue when heavy tf is executed from multiple concurrent queries + // Prevent from possible thundering herd issue when potentially heavy tf is executed from multiple concurrent queries // by temporary persisting its position in the tag filters list. if origLoopsCount == 0 { - origLoopsCount = 10e6 + origLoopsCount = 50e6 } - lastQueryTimestamp = 0 - is.storeLoopsCountForDateFilter(date, tf, origLoopsCount, lastQueryTimestamp) + is.storeLoopsCountForDateFilter(date, tf, origLoopsCount) } tfws[i] = tagFilterWithWeight{ - tf: tf, - loopsCount: loopsCount, - lastQueryTimestamp: lastQueryTimestamp, + tf: tf, + loopsCount: loopsCount, } } sort.Slice(tfws, func(i, j int) bool { @@ -2841,8 +2843,12 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter continue } m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, nil, tfs.commonPrefix, maxDateMetrics) - is.storeLoopsCountForDateFilter(date, tf, loopsCount, tfw.lastQueryTimestamp) + is.storeLoopsCountForDateFilter(date, tf, loopsCount) if err != nil { + if err == errFallbackToMetricNameMatch { + tfsPostponed = append(tfsPostponed, tf) + continue + } return nil, err } if m.Len() >= maxDateMetrics { @@ -2896,12 +2902,16 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // instead of scanning big number of entries in the inverted index for these filters. tfsPostponed = append(tfsPostponed, tf) // Store stats for non-executed tf, since it could be updated during protection from thundered herd. - is.storeLoopsCountForDateFilter(date, tf, tfw.loopsCount, tfw.lastQueryTimestamp) + is.storeLoopsCountForDateFilter(date, tf, tfw.loopsCount) continue } m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, metricIDs, tfs.commonPrefix, maxDateMetrics) - is.storeLoopsCountForDateFilter(date, tf, loopsCount, tfw.lastQueryTimestamp) + is.storeLoopsCountForDateFilter(date, tf, loopsCount) if err != nil { + if err == errFallbackToMetricNameMatch { + tfsPostponed = append(tfsPostponed, tf) + continue + } return nil, err } if m.Len() >= maxDateMetrics { @@ -3115,13 +3125,8 @@ func (is *indexSearch) getLoopsCountAndTimestampForDateFilter(date uint64, tf *t return loopsCount, timestamp } -func (is *indexSearch) storeLoopsCountForDateFilter(date uint64, tf *tagFilter, loopsCount, prevTimestamp uint64) { +func (is *indexSearch) storeLoopsCountForDateFilter(date uint64, tf *tagFilter, loopsCount uint64) { currentTimestamp := fasttime.UnixTimestamp() - if currentTimestamp < prevTimestamp+5 { - // The cache already contains quite fresh entry for the current (date, tf). - // Do not update it too frequently. - return - } is.kb.B = appendDateTagFilterCacheKey(is.kb.B[:0], date, tf) kb := kbPool.Get() kb.B = encoding.MarshalUint64(kb.B[:0], loopsCount) @@ -3201,11 +3206,6 @@ func (is *indexSearch) updateMetricIDsForPrefix(prefix []byte, metricIDs *uint64 // over the found metrics. const maxIndexScanLoopsPerMetric = 100 -// The maximum number of slow index scan loops. -// Bigger number of loops is slower than updateMetricIDsByMetricNameMatch -// over the found metrics. -const maxIndexScanSlowLoopsPerMetric = 20 - func (is *indexSearch) intersectMetricIDsWithTagFilter(tf *tagFilter, filter *uint64set.Set) (*uint64set.Set, error) { if filter.Len() == 0 { return nil, nil @@ -3251,7 +3251,7 @@ func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, fil } // Slow path - scan for all the rows with the given prefix. - maxLoopsCount := uint64(filter.Len()) * maxIndexScanSlowLoopsPerMetric + maxLoopsCount := uint64(filter.Len()) * maxIndexScanLoopsPerMetric _, err := is.getMetricIDsForTagFilterSlow(tf, filter, maxLoopsCount, func(metricID uint64) { if tf.isNegative { // filter must be equal to metricIDs diff --git a/lib/storage/tag_filters.go b/lib/storage/tag_filters.go index 6f88b6e54..1a56c3fad 100644 --- a/lib/storage/tag_filters.go +++ b/lib/storage/tag_filters.go @@ -248,6 +248,10 @@ type tagFilter struct { graphiteReverseSuffix []byte } +func (tf *tagFilter) looksLikeHeavy() bool { + return tf.isRegexp && len(tf.orSuffixes) == 0 +} + func (tf *tagFilter) isComposite() bool { k := tf.key return len(k) > 0 && k[0] == compositeTagKeyPrefix From f3d712724c15fc27449f4e42beae917f702f68fb Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 01:22:51 +0200 Subject: [PATCH 27/59] docs/Articles.md: add https://www.sensedia.com/post/monitoring-with-prometheus-alertmanager --- docs/Articles.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/Articles.md b/docs/Articles.md index c6c8c645e..ca36b86b6 100644 --- a/docs/Articles.md +++ b/docs/Articles.md @@ -27,6 +27,7 @@ * [Observability, Availability & DORA’s Research Program](https://medium.com/alteos-tech-blog/observability-availability-and-doras-research-program-85deb6680e78) * [Tame Kubernetes Costs with Percona Monitoring and Management and Prometheus Operator](https://www.percona.com/blog/2021/02/12/tame-kubernetes-costs-with-percona-monitoring-and-management-and-prometheus-operator/) * [Prometheus Victoria Metrics On AWS ECS](https://dalefro.medium.com/prometheus-victoria-metrics-on-aws-ecs-62448e266090) +* [Monitoring with Prometheus, Grafana, AlertManager and VictoriaMetrics](https://www.sensedia.com/post/monitoring-with-prometheus-alertmanager) ## Our articles From fa448806a5ba4a1d4e1446beb5a20f5db4a35775 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 01:52:11 +0200 Subject: [PATCH 28/59] deployment/docker: update Go builder from 1.16.1 to 1.16.2 See https://github.com/golang/go/issues?q=milestone%3AGo1.16.2+label%3ACherryPickApproved --- deployment/docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/docker/Makefile b/deployment/docker/Makefile index 8b760435f..3fc7248a8 100644 --- a/deployment/docker/Makefile +++ b/deployment/docker/Makefile @@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics ROOT_IMAGE ?= alpine:3.13.2 CERTS_IMAGE := alpine:3.13.2 -GO_BUILDER_IMAGE := golang:1.16.1 +GO_BUILDER_IMAGE := golang:1.16.2 BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _) BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _) From a6a71ef861444eb11fe8ec6d2387f0fc0c4aea87 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 03:35:49 +0200 Subject: [PATCH 29/59] lib/promscrape: add ability to configure proxy options via `proxy_tls_config`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 --- app/vmagent/README.md | 35 +++++++++++++++++++++++++++++++++++ docs/CHANGELOG.md | 1 + docs/vmagent.md | 35 +++++++++++++++++++++++++++++++++++ lib/promscrape/client.go | 2 +- lib/promscrape/config.go | 21 ++++++++++++++++----- lib/promscrape/config_test.go | 32 +++++++++++++++++++++++++++++++- lib/promscrape/scrapework.go | 12 ++++++++---- lib/proxy/proxy.go | 21 +++++++++++++++++++++ 8 files changed, 148 insertions(+), 11 deletions(-) diff --git a/app/vmagent/README.md b/app/vmagent/README.md index 22aaa2360..2606690c5 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -255,6 +255,41 @@ If each target is scraped by multiple `vmagent` instances, then data deduplicati See [these docs](https://victoriametrics.github.io/#deduplication) for details. +## Scraping targets via a proxy + +`vmagent` supports scraping targets via http and https proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs +target scraping via https proxy at `https://proxy-addr:1234`: + +```yml +scrape_configs: +- job_name: foo + proxy_url: https://proxy-addr:1234 +``` + +Proxy can be configured with the following optional settings: + +* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization +* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). +* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config). + +For example: + +```yml +scrape_configs: +- job_name: foo + proxy_url: https://proxy-addr:1234 + proxy_basic_auth: + username: foobar + password: secret + proxy_tls_config: + insecure_skip_verify: true + cert_file: /path/to/cert + key_file: /path/to/key + ca_file: /path/to/ca + server_name: real-server-name +``` + + ## Monitoring `vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index b574ded3d..3b34a78e1 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -10,6 +10,7 @@ * FEATURE: export `vm_available_memory_bytes` and `vm_available_cpu_cores` metrics, which show the number of available RAM and available CPU cores for VictoriaMetrics apps. * FEATURE: vmagent: add ability to replicate scrape targets among `vmagent` instances in the cluster with `-promscrape.cluster.replicationFactor` command-line flag. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets). * FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. +* FEATURE: vmagent: support `proxy_tls_config`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` options at `scrape_config` section for configuring proxies specified via `proxy_url`. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-targets-via-a-proxy). * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). diff --git a/docs/vmagent.md b/docs/vmagent.md index 22aaa2360..2606690c5 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -255,6 +255,41 @@ If each target is scraped by multiple `vmagent` instances, then data deduplicati See [these docs](https://victoriametrics.github.io/#deduplication) for details. +## Scraping targets via a proxy + +`vmagent` supports scraping targets via http and https proxies. Proxy address must be specified in `proxy_url` option. For example, the following scrape config instructs +target scraping via https proxy at `https://proxy-addr:1234`: + +```yml +scrape_configs: +- job_name: foo + proxy_url: https://proxy-addr:1234 +``` + +Proxy can be configured with the following optional settings: + +* `proxy_bearer_token` and `proxy_bearer_token_file` for Bearer token authorization +* `proxy_basic_auth` for Basic authorization. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). +* `proxy_tls_config` for TLS config. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config). + +For example: + +```yml +scrape_configs: +- job_name: foo + proxy_url: https://proxy-addr:1234 + proxy_basic_auth: + username: foobar + password: secret + proxy_tls_config: + insecure_skip_verify: true + cert_file: /path/to/cert + key_file: /path/to/key + ca_file: /path/to/ca + server_name: real-server-name +``` + + ## Monitoring `vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index 958c06055..f6a264802 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -67,7 +67,7 @@ func newClient(sw *ScrapeWork) *client { host += ":443" } } - dialFunc, err := newStatDialFunc(sw.ProxyURL, sw.AuthConfig) + dialFunc, err := newStatDialFunc(sw.ProxyURL, sw.ProxyAuthConfig) if err != nil { logger.Fatalf("cannot create dial func: %s", err) } diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index 8d558862f..b637b8316 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -110,11 +110,15 @@ type ScrapeConfig struct { SampleLimit int `yaml:"sample_limit,omitempty"` // These options are supported only by lib/promscrape. - DisableCompression bool `yaml:"disable_compression,omitempty"` - DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` - StreamParse bool `yaml:"stream_parse,omitempty"` - ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` - ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` + DisableCompression bool `yaml:"disable_compression,omitempty"` + DisableKeepAlive bool `yaml:"disable_keepalive,omitempty"` + StreamParse bool `yaml:"stream_parse,omitempty"` + ScrapeAlignInterval time.Duration `yaml:"scrape_align_interval,omitempty"` + ScrapeOffset time.Duration `yaml:"scrape_offset,omitempty"` + ProxyTLSConfig *promauth.TLSConfig `yaml:"proxy_tls_config,omitempty"` + ProxyBasicAuth *promauth.BasicAuthConfig `yaml:"proxy_basic_auth,omitempty"` + ProxyBearerToken string `yaml:"proxy_bearer_token,omitempty"` + ProxyBearerTokenFile string `yaml:"proxy_bearer_token_file,omitempty"` // This is set in loadConfig swc *scrapeWorkConfig @@ -543,6 +547,10 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf if err != nil { return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err) } + proxyAC, err := promauth.NewConfig(baseDir, sc.ProxyBasicAuth, sc.ProxyBearerToken, sc.ProxyBearerTokenFile, sc.ProxyTLSConfig) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy auth config for `job_name` %q: %w", jobName, err) + } relabelConfigs, err := promrelabel.ParseRelabelConfigs(sc.RelabelConfigs) if err != nil { return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %w", jobName, err) @@ -559,6 +567,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf scheme: scheme, params: params, proxyURL: sc.ProxyURL, + proxyAuthConfig: proxyAC, authConfig: ac, honorLabels: honorLabels, honorTimestamps: honorTimestamps, @@ -583,6 +592,7 @@ type scrapeWorkConfig struct { scheme string params map[string][]string proxyURL proxy.URL + proxyAuthConfig *promauth.Config authConfig *promauth.Config honorLabels bool honorTimestamps bool @@ -849,6 +859,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel OriginalLabels: originalLabels, Labels: labels, ProxyURL: swc.proxyURL, + ProxyAuthConfig: swc.proxyAuthConfig, AuthConfig: swc.authConfig, MetricRelabelConfigs: swc.metricRelabelConfigs, SampleLimit: swc.sampleLimit, diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go index f75dae834..ac7ce98fb 100644 --- a/lib/promscrape/config_test.go +++ b/lib/promscrape/config_test.go @@ -10,6 +10,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" ) func TestNeedSkipScrapeWork(t *testing.T) { @@ -154,6 +155,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "blackbox", }} if !reflect.DeepEqual(sws, swsExpected) { @@ -548,6 +550,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, { @@ -587,6 +590,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, { @@ -626,6 +630,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -679,6 +684,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -729,6 +735,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -748,6 +755,10 @@ scrape_configs: p: ["x&y", "="] xaa: bearer_token: xyz + proxy_url: http://foo.bar + proxy_basic_auth: + username: foo + password: bar static_configs: - targets: ["foo.bar", "aaa"] labels: @@ -801,6 +812,10 @@ scrape_configs: AuthConfig: &promauth.Config{ Authorization: "Bearer xyz", }, + ProxyAuthConfig: &promauth.Config{ + Authorization: "Basic Zm9vOmJhcg==", + }, + ProxyURL: proxy.MustNewURL("http://foo.bar"), jobNameOriginal: "foo", }, { @@ -842,6 +857,10 @@ scrape_configs: AuthConfig: &promauth.Config{ Authorization: "Bearer xyz", }, + ProxyAuthConfig: &promauth.Config{ + Authorization: "Basic Zm9vOmJhcg==", + }, + ProxyURL: proxy.MustNewURL("http://foo.bar"), jobNameOriginal: "foo", }, { @@ -877,6 +896,7 @@ scrape_configs: TLSServerName: "foobar", TLSInsecureSkipVerify: true, }, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "qwer", }, }) @@ -955,6 +975,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1017,6 +1038,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1060,6 +1082,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1099,7 +1122,8 @@ scrape_configs: Value: "foo", }, }, - AuthConfig: &promauth.Config{}, + AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, MetricRelabelConfigs: mustParseRelabelConfigs(` - source_labels: [foo] target_label: abc @@ -1145,6 +1169,7 @@ scrape_configs: AuthConfig: &promauth.Config{ Authorization: "Basic eHl6OnNlY3JldC1wYXNz", }, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1184,6 +1209,7 @@ scrape_configs: AuthConfig: &promauth.Config{ Authorization: "Bearer secret-pass", }, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1229,6 +1255,7 @@ scrape_configs: AuthConfig: &promauth.Config{ TLSCertificate: &snakeoilCert, }, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "foo", }, }) @@ -1291,6 +1318,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, jobNameOriginal: "aaa", }, }) @@ -1352,6 +1380,7 @@ scrape_configs: }, }, AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, SampleLimit: 100, DisableKeepAlive: true, DisableCompression: true, @@ -1398,6 +1427,7 @@ scrape_configs: }, jobNameOriginal: "path wo slash", AuthConfig: &promauth.Config{}, + ProxyAuthConfig: &promauth.Config{}, }, }) } diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index 003b0348a..d19ae8b34 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -68,12 +68,15 @@ type ScrapeWork struct { // See also https://prometheus.io/docs/concepts/jobs_instances/ Labels []prompbmarshal.Label - // Auth config - AuthConfig *promauth.Config - // ProxyURL HTTP proxy url ProxyURL proxy.URL + // Auth config for ProxyUR: + ProxyAuthConfig *promauth.Config + + // Auth config + AuthConfig *promauth.Config + // Optional `metric_relabel_configs`. MetricRelabelConfigs *promrelabel.ParsedConfigs @@ -105,9 +108,10 @@ type ScrapeWork struct { func (sw *ScrapeWork) key() string { // Do not take into account OriginalLabels. key := fmt.Sprintf("ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, Labels=%s, "+ - "AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+ + "ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%s, SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+ "ScrapeAlignInterval=%s, ScrapeOffset=%s", sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.LabelsString(), + sw.ProxyURL.String(), sw.ProxyAuthConfig.String(), sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(), sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse, sw.ScrapeAlignInterval, sw.ScrapeOffset) return key diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 8915ba2e1..8582a83ae 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/fasthttp" @@ -20,6 +21,17 @@ type URL struct { url *url.URL } +// MustNewURL returns new URL for the given u. +func MustNewURL(u string) URL { + pu, err := url.Parse(u) + if err != nil { + logger.Panicf("BUG: cannot parse u=%q: %s", u, err) + } + return URL{ + url: pu, + } +} + // URL return the underlying url. func (u *URL) URL() *url.URL { if u == nil || u.url == nil { @@ -28,6 +40,15 @@ func (u *URL) URL() *url.URL { return u.url } +// String returns string representation of u. +func (u *URL) String() string { + pu := u.URL() + if pu == nil { + return "" + } + return pu.String() +} + // MarshalYAML implements yaml.Marshaler interface. func (u *URL) MarshalYAML() (interface{}, error) { if u.url == nil { From 895d5d13550a23a2a1b9afeb9b23c7e5a3ba7562 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 10:40:55 +0200 Subject: [PATCH 30/59] lib/proxy: set proxy address in tls.Config.ServerName instead of the target address Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 --- lib/proxy/proxy.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 8582a83ae..8222b035f 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -93,19 +93,21 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { if authHeader != "" { authHeader = "Proxy-Authorization: " + authHeader + "\r\n" } - tlsCfg := ac.NewTLSConfig() + var tlsCfg *tls.Config + if isTLS { + tlsCfg = ac.NewTLSConfig() + if !tlsCfg.InsecureSkipVerify && tlsCfg.ServerName == "" { + tlsCfg = tlsCfg.Clone() + tlsCfg.ServerName = tlsServerName(proxyAddr) + } + } dialFunc := func(addr string) (net.Conn, error) { proxyConn, err := defaultDialFunc(proxyAddr) if err != nil { return nil, fmt.Errorf("cannot connect to proxy %q: %w", pu.Redacted(), err) } if isTLS { - tlsCfgLocal := tlsCfg - if !tlsCfgLocal.InsecureSkipVerify && tlsCfgLocal.ServerName == "" { - tlsCfgLocal = tlsCfgLocal.Clone() - tlsCfgLocal.ServerName = tlsServerName(addr) - } - proxyConn = tls.Client(proxyConn, tlsCfgLocal) + proxyConn = tls.Client(proxyConn, tlsCfg) } conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader) if err != nil { From ca4d5ce037b01676ab661793d0da6109e0098bc5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 10:46:45 +0200 Subject: [PATCH 31/59] lib/proxy: there is no need in cloning tlsCfg, which has been created two lines above --- lib/proxy/proxy.go | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 8222b035f..097fed222 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -97,7 +97,6 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) { if isTLS { tlsCfg = ac.NewTLSConfig() if !tlsCfg.InsecureSkipVerify && tlsCfg.ServerName == "" { - tlsCfg = tlsCfg.Clone() tlsCfg.ServerName = tlsServerName(proxyAddr) } } From def014eb75258577d273b7b11f11b5e1328dc165 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 11:21:20 +0200 Subject: [PATCH 32/59] lib/promscrape/discovery/kubernetes: remove debug lines left after the commit 133b288681978d82cd3a4c6125824346b9344f9f --- lib/promscrape/discovery/kubernetes/api_watcher.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 26e407690..89984741b 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -207,11 +207,9 @@ func getURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) key := fmt.Sprintf("url=%s, proxyURL=%v, authConfig=%s", apiURL, proxyURL, ac.String()) urlWatchersLock.Lock() uw := urlWatchers[key] - logger.Infof("found watcher for key=%s", key) if uw == nil { uw = newURLWatcher(role, apiURL, proxyURL, ac) urlWatchers[key] = uw - logger.Infof("registered watcher for key=%s", key) } urlWatchersLock.Unlock() return uw From 43552fa8d3d0d21bfd66526c49cbfd77e2807031 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 11:45:10 +0200 Subject: [PATCH 33/59] docs/CaseStudies.md: fix incorrect number of active time series for Zhihu --- docs/CaseStudies.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CaseStudies.md b/docs/CaseStudies.md index c9c9a8e01..6b54ef38a 100644 --- a/docs/CaseStudies.md +++ b/docs/CaseStudies.md @@ -338,7 +338,7 @@ Please see [Monitoring K8S with VictoriaMetrics](https://docs.google.com/present Numbers: -- Active time series: ~2500 Million +- Active time series: ~25 Million - Datapoints: ~20 Trillion - Ingestion rate: ~1800k/s - Disk usage: ~20 TB From 7c002023d78cbbe239648d167322d12f4f91e78f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 12:16:50 +0200 Subject: [PATCH 34/59] app/vmselect/prometheus: do not include datapoints with timestamps matching `t-d` when returning results from `/api/v1/query?query=m[d]&time=t` as Prometheus does --- app/vmselect/prometheus/prometheus.go | 5 +++++ docs/CHANGELOG.md | 1 + 2 files changed, 6 insertions(+) diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 3e7799e8d..937528269 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -968,6 +968,11 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e start -= offset end := start start = end - window + // Do not includ data point with a timestamp matching the lower boundary of the window as Prometheus does. + start++ + if end < start { + end = start + } if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil { return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err) } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 3b34a78e1..4442b209b 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -20,6 +20,7 @@ * BUGFIX: vmagent: properly scrape targets via https proxy specified in `proxy_url` if `insecure_skip_verify` flag isn't set in `tls_config` section. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1116 * BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets. * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 +* BUGFIX: do not include datapoints with a timestamp `t-d` when returning results from `/api/v1/query?query=m[d]&time=t` as Prometheus does. # [v1.55.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.55.1) From 2096c6e4642f40d416f873fbdb48f8c925555881 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Mar 2021 12:19:05 +0200 Subject: [PATCH 35/59] app/vmselect/prometheus: typo fix after 7c002023d78cbbe239648d167322d12f4f91e78f --- app/vmselect/prometheus/prometheus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 937528269..93f5f3a8c 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -968,7 +968,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e start -= offset end := start start = end - window - // Do not includ data point with a timestamp matching the lower boundary of the window as Prometheus does. + // Do not include data point with a timestamp matching the lower boundary of the window as Prometheus does. start++ if end < start { end = start From 7a16e8e3a27ae0d4c4abbc46209b6f0fce8dffab Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 13 Mar 2021 15:18:47 +0200 Subject: [PATCH 36/59] lib/promscrape/discovery: fixes after 133b288681978d82cd3a4c6125824346b9344f9f - Removed a deadlock in addAPIWatcher - Do not create unused ScrapeWork objects - Do not spend CPU resources on creating objectByKey map in addAPIWatcher This work is based on https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1125 --- .../discovery/kubernetes/api_watcher.go | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 89984741b..7f621f21a 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -132,7 +132,7 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss [] // getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { - aw.startWatchersForRole(aw.sdc.Role) + aw.startWatchersForRole(aw.sdc.Role, true) aw.swosByKeyLock.Lock() defer aw.swosByKeyLock.Unlock() @@ -154,7 +154,7 @@ func (aw *apiWatcher) getObjectByRole(role, namespace, name string) object { return nil } key := namespace + "/" + name - aw.startWatchersForRole(role) + aw.startWatchersForRole(role, false) aw.watchersByURLLock.Lock() defer aw.watchersByURLLock.Unlock() @@ -172,15 +172,15 @@ func (aw *apiWatcher) getObjectByRole(role, namespace, name string) object { return nil } -func (aw *apiWatcher) startWatchersForRole(role string) { +func (aw *apiWatcher) startWatchersForRole(role string, registerAPIWatcher bool) { paths := getAPIPaths(role, aw.sdc.Namespaces.Names, aw.sdc.Selectors) for _, path := range paths { apiURL := aw.apiServer + path - aw.startWatcherForURL(role, apiURL) + aw.startWatcherForURL(role, apiURL, registerAPIWatcher) } } -func (aw *apiWatcher) startWatcherForURL(role, apiURL string) { +func (aw *apiWatcher) startWatcherForURL(role, apiURL string, registerAPIWatcher bool) { aw.watchersByURLLock.Lock() if aw.watchersByURL[apiURL] != nil { // Watcher for the given path already exists. @@ -188,16 +188,21 @@ func (aw *apiWatcher) startWatcherForURL(role, apiURL string) { return } uw := getURLWatcher(role, apiURL, aw.sdc.ProxyURL.URL(), aw.ac) - uw.addAPIWatcher(aw) aw.watchersByURL[apiURL] = uw aw.watchersByURLLock.Unlock() + uw.initOnce() + if registerAPIWatcher { + uw.addAPIWatcher(aw) + } aw.wg.Add(1) go func() { defer aw.wg.Done() <-aw.stopCh + if registerAPIWatcher { + uw.removeAPIWatcher(aw) + } aw.watchersByURLLock.Lock() - uw.removeAPIWatcher(aw) delete(aw.watchersByURL, apiURL) aw.watchersByURLLock.Unlock() }() @@ -230,6 +235,9 @@ type urlWatcher struct { parseObject parseObjectFunc parseObjectList parseObjectListFunc + // once is used for initializing the urlWatcher only once + once sync.Once + // mu protects aws, objectsByKey and resourceVersion mu sync.Mutex @@ -280,24 +288,24 @@ func newURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) objectsRemoved: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_removed_total{role=%q}`, role)), objectsUpdated: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_updated_total{role=%q}`, role)), } - uw.reloadObjects() - go uw.watchForUpdates() return uw } +func (uw *urlWatcher) initOnce() { + uw.once.Do(func() { + uw.reloadObjects() + go uw.watchForUpdates() + }) +} + func (uw *urlWatcher) addAPIWatcher(aw *apiWatcher) { uw.mu.Lock() if _, ok := uw.aws[aw]; ok { logger.Panicf("BUG: aw=%p has been already added", aw) } uw.aws[aw] = struct{}{} - objectsByKey := make(map[string]object) - for key, o := range uw.objectsByKey { - objectsByKey[key] = o - } + aw.reloadScrapeWorks(uw.objectsByKey) uw.mu.Unlock() - - aw.reloadScrapeWorks(objectsByKey) } func (uw *urlWatcher) removeAPIWatcher(aw *apiWatcher) { @@ -425,7 +433,7 @@ func (uw *urlWatcher) watchForUpdates() { } resp, err := uw.doRequest(requestURL) if err != nil { - logger.Errorf("cannot performing request to %q: %s", requestURL, err) + logger.Errorf("cannot perform request to %q: %s", requestURL, err) backoffSleep() continue } From d4098985159ff78cc5ac67a33e84b02bd317b538 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 21:10:35 +0200 Subject: [PATCH 37/59] lib/promscrape/discovery/kubernetes: further optimize kubernetes service discovery for the case with many scrape jobs Do not re-calculate labels per each scrape job - reuse them instead for scrape jobs with identical Kubernetes role Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 --- .../discovery/kubernetes/api_watcher.go | 433 ++++++++++-------- .../discovery/kubernetes/endpoints.go | 12 +- .../discovery/kubernetes/endpointslices.go | 6 +- .../discovery/kubernetes/ingress.go | 2 +- lib/promscrape/discovery/kubernetes/node.go | 2 +- lib/promscrape/discovery/kubernetes/pod.go | 2 +- .../discovery/kubernetes/service.go | 2 +- 7 files changed, 262 insertions(+), 197 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 7f621f21a..1e09e8955 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -33,7 +33,7 @@ type WatchEvent struct { // object is any Kubernetes object. type object interface { key() string - getTargetLabels(aw *apiWatcher) []map[string]string + getTargetLabels(gw *groupWatcher) []map[string]string } // parseObjectFunc must parse object from the given data. @@ -44,59 +44,42 @@ type parseObjectListFunc func(r io.Reader) (map[string]object, ListMeta, error) // apiWatcher is used for watching for Kuberntes object changes and caching their latest states. type apiWatcher struct { - // Kubenetes API server address in the form http://api-server - apiServer string - - // ac contains auth config for communicating with apiServer - ac *promauth.Config - - // sdc contains the related SDConfig - sdc *SDConfig + role string // Constructor for creating ScrapeWork objects from labels swcFunc ScrapeWorkConstructorFunc + gw *groupWatcher + // swos contains a map of ScrapeWork objects for the given apiWatcher swosByKey map[string][]interface{} swosByKeyLock sync.Mutex - // a map of watchers keyed by request urls - watchersByURL map[string]*urlWatcher - watchersByURLLock sync.Mutex - - stopCh chan struct{} - wg sync.WaitGroup + swosCount *metrics.Counter } func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc ScrapeWorkConstructorFunc) *apiWatcher { + namespaces := sdc.Namespaces.Names + selectors := sdc.Selectors + proxyURL := sdc.ProxyURL.URL() + gw := getGroupWatcher(apiServer, ac, namespaces, selectors, proxyURL) return &apiWatcher{ - apiServer: apiServer, - ac: ac, - sdc: sdc, + role: sdc.Role, swcFunc: swcFunc, - - swosByKey: make(map[string][]interface{}), - watchersByURL: make(map[string]*urlWatcher), - - stopCh: make(chan struct{}), + gw: gw, + swosByKey: make(map[string][]interface{}), + swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, sdc.Role)), } } func (aw *apiWatcher) mustStop() { - close(aw.stopCh) - aw.wg.Wait() + aw.gw.unsubscribeAPIWatcher(aw) + aw.reloadScrapeWorks(make(map[string][]interface{})) } -func (aw *apiWatcher) reloadScrapeWorks(objectsByKey map[string]object) { - swosByKey := make(map[string][]interface{}) - for key, o := range objectsByKey { - labels := o.getTargetLabels(aw) - swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) - if len(swos) > 0 { - swosByKey[key] = swos - } - } +func (aw *apiWatcher) reloadScrapeWorks(swosByKey map[string][]interface{}) { aw.swosByKeyLock.Lock() + aw.swosCount.Add(len(swosByKey) - len(aw.swosByKey)) aw.swosByKey = swosByKey aw.swosByKeyLock.Unlock() } @@ -105,8 +88,10 @@ func (aw *apiWatcher) setScrapeWorks(key string, labels []map[string]string) { swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) aw.swosByKeyLock.Lock() if len(swos) > 0 { + aw.swosCount.Add(len(swos) - len(aw.swosByKey[key])) aw.swosByKey[key] = swos } else { + aw.swosCount.Add(-len(aw.swosByKey[key])) delete(aw.swosByKey, key) } aw.swosByKeyLock.Unlock() @@ -114,6 +99,7 @@ func (aw *apiWatcher) setScrapeWorks(key string, labels []map[string]string) { func (aw *apiWatcher) removeScrapeWorks(key string) { aw.swosByKeyLock.Lock() + aw.swosCount.Add(-len(aw.swosByKey[key])) delete(aw.swosByKey, key) aw.swosByKeyLock.Unlock() } @@ -132,7 +118,7 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss [] // getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { - aw.startWatchersForRole(aw.sdc.Role, true) + aw.gw.startWatchersForRole(aw.role, aw) aw.swosByKeyLock.Lock() defer aw.swosByKeyLock.Unlock() @@ -147,115 +133,20 @@ func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { return swos } -// getObjectByRole returns an object with the given (namespace, name) key and the given role. -func (aw *apiWatcher) getObjectByRole(role, namespace, name string) object { - if aw == nil { - // this is needed for testing - return nil - } - key := namespace + "/" + name - aw.startWatchersForRole(role, false) - aw.watchersByURLLock.Lock() - defer aw.watchersByURLLock.Unlock() - - for _, uw := range aw.watchersByURL { - if uw.role != role { - continue - } - uw.mu.Lock() - o := uw.objectsByKey[key] - uw.mu.Unlock() - if o != nil { - return o - } - } - return nil -} - -func (aw *apiWatcher) startWatchersForRole(role string, registerAPIWatcher bool) { - paths := getAPIPaths(role, aw.sdc.Namespaces.Names, aw.sdc.Selectors) - for _, path := range paths { - apiURL := aw.apiServer + path - aw.startWatcherForURL(role, apiURL, registerAPIWatcher) - } -} - -func (aw *apiWatcher) startWatcherForURL(role, apiURL string, registerAPIWatcher bool) { - aw.watchersByURLLock.Lock() - if aw.watchersByURL[apiURL] != nil { - // Watcher for the given path already exists. - aw.watchersByURLLock.Unlock() - return - } - uw := getURLWatcher(role, apiURL, aw.sdc.ProxyURL.URL(), aw.ac) - aw.watchersByURL[apiURL] = uw - aw.watchersByURLLock.Unlock() - uw.initOnce() - if registerAPIWatcher { - uw.addAPIWatcher(aw) - } - - aw.wg.Add(1) - go func() { - defer aw.wg.Done() - <-aw.stopCh - if registerAPIWatcher { - uw.removeAPIWatcher(aw) - } - aw.watchersByURLLock.Lock() - delete(aw.watchersByURL, apiURL) - aw.watchersByURLLock.Unlock() - }() -} - -func getURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) *urlWatcher { - key := fmt.Sprintf("url=%s, proxyURL=%v, authConfig=%s", apiURL, proxyURL, ac.String()) - urlWatchersLock.Lock() - uw := urlWatchers[key] - if uw == nil { - uw = newURLWatcher(role, apiURL, proxyURL, ac) - urlWatchers[key] = uw - } - urlWatchersLock.Unlock() - return uw -} - -var ( - urlWatchersLock sync.Mutex - urlWatchers = make(map[string]*urlWatcher) -) - -// urlWatcher watches for an apiURL and updates object states in objectsByKey. -type urlWatcher struct { - role string - apiURL string +// groupWatcher watches for Kubernetes objects on the given apiServer with the given namespaces, +// selectors and authorization using the given client. +type groupWatcher struct { + apiServer string + namespaces []string + selectors []Selector authorization string client *http.Client - parseObject parseObjectFunc - parseObjectList parseObjectListFunc - - // once is used for initializing the urlWatcher only once - once sync.Once - - // mu protects aws, objectsByKey and resourceVersion mu sync.Mutex - - // aws contains registered apiWatcher objects - aws map[*apiWatcher]struct{} - - // objectsByKey contains the latest state for objects obtained from apiURL - objectsByKey map[string]object - - resourceVersion string - - objectsCount *metrics.Counter - objectsAdded *metrics.Counter - objectsRemoved *metrics.Counter - objectsUpdated *metrics.Counter + m map[string]*urlWatcher } -func newURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) *urlWatcher { +func newGroupWatcher(apiServer string, ac *promauth.Config, namespaces []string, selectors []Selector, proxyURL *url.URL) *groupWatcher { var proxy func(*http.Request) (*url.URL, error) if proxyURL != nil { proxy = http.ProxyURL(proxyURL) @@ -269,18 +160,173 @@ func newURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) }, Timeout: *apiServerTimeout, } + return &groupWatcher{ + apiServer: apiServer, + authorization: ac.Authorization, + namespaces: namespaces, + selectors: selectors, + client: client, + m: make(map[string]*urlWatcher), + } +} + +func getGroupWatcher(apiServer string, ac *promauth.Config, namespaces []string, selectors []Selector, proxyURL *url.URL) *groupWatcher { + key := fmt.Sprintf("apiServer=%s, namespaces=%s, selectors=%s, proxyURL=%v, authConfig=%s", + apiServer, namespaces, selectorsKey(selectors), proxyURL, ac.String()) + groupWatchersLock.Lock() + gw := groupWatchers[key] + if gw == nil { + gw = newGroupWatcher(apiServer, ac, namespaces, selectors, proxyURL) + groupWatchers[key] = gw + } + groupWatchersLock.Unlock() + return gw +} + +func selectorsKey(selectors []Selector) string { + var sb strings.Builder + for _, s := range selectors { + fmt.Fprintf(&sb, "{role=%q, label=%q, field=%q}", s.Role, s.Label, s.Field) + } + return sb.String() +} + +var ( + groupWatchersLock sync.Mutex + groupWatchers = make(map[string]*groupWatcher) + + _ = metrics.NewGauge(`vm_promscrape_discovery_kubernetes_group_watchers`, func() float64 { + groupWatchersLock.Lock() + n := len(groupWatchers) + groupWatchersLock.Unlock() + return float64(n) + }) +) + +// getObjectByRole returns an object with the given (namespace, name) key and the given role. +func (gw *groupWatcher) getObjectByRole(role, namespace, name string) object { + if gw == nil { + // this is needed for testing + return nil + } + key := namespace + "/" + name + gw.startWatchersForRole(role, nil) + gw.mu.Lock() + defer gw.mu.Unlock() + + for _, uw := range gw.m { + if uw.role != role { + continue + } + uw.mu.Lock() + o := uw.objectsByKey[key] + uw.mu.Unlock() + if o != nil { + return o + } + } + return nil +} + +func (gw *groupWatcher) startWatchersForRole(role string, aw *apiWatcher) { + paths := getAPIPaths(role, gw.namespaces, gw.selectors) + for _, path := range paths { + apiURL := gw.apiServer + path + gw.mu.Lock() + uw := gw.m[apiURL] + if uw == nil { + uw = newURLWatcher(role, apiURL, gw) + gw.m[apiURL] = uw + } + gw.mu.Unlock() + uw.subscribeAPIWatcher(aw) + } +} + +func (gw *groupWatcher) reloadScrapeWorksForAPIWatchers(aws []*apiWatcher, objectsByKey map[string]object) { + if len(aws) == 0 { + return + } + swosByKey := make([]map[string][]interface{}, len(aws)) + for i := range aws { + swosByKey[i] = make(map[string][]interface{}) + } + for key, o := range objectsByKey { + labels := o.getTargetLabels(gw) + for i, aw := range aws { + swos := getScrapeWorkObjectsForLabels(aw.swcFunc, labels) + if len(swos) > 0 { + swosByKey[i][key] = swos + } + } + } + for i, aw := range aws { + aw.reloadScrapeWorks(swosByKey[i]) + } +} + +// doRequest performs http request to the given requestURL. +func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) { + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + logger.Fatalf("cannot create a request for %q: %s", requestURL, err) + } + if gw.authorization != "" { + req.Header.Set("Authorization", gw.authorization) + } + return gw.client.Do(req) +} + +func (gw *groupWatcher) unsubscribeAPIWatcher(aw *apiWatcher) { + gw.mu.Lock() + for _, uw := range gw.m { + uw.unsubscribeAPIWatcher(aw) + } + gw.mu.Unlock() +} + +// urlWatcher watches for an apiURL and updates object states in objectsByKey. +type urlWatcher struct { + role string + apiURL string + gw *groupWatcher + + parseObject parseObjectFunc + parseObjectList parseObjectListFunc + + // mu protects aws, awsPending, objectsByKey and resourceVersion + mu sync.Mutex + + // aws contains registered apiWatcher objects + aws map[*apiWatcher]struct{} + + // awsPending contains pending apiWatcher objects, which must be moved to aws in a batch + awsPending map[*apiWatcher]struct{} + + // objectsByKey contains the latest state for objects obtained from apiURL + objectsByKey map[string]object + + resourceVersion string + + objectsCount *metrics.Counter + objectsAdded *metrics.Counter + objectsRemoved *metrics.Counter + objectsUpdated *metrics.Counter +} + +func newURLWatcher(role, apiURL string, gw *groupWatcher) *urlWatcher { parseObject, parseObjectList := getObjectParsersForRole(role) metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, role)).Inc() uw := &urlWatcher{ - role: role, - apiURL: apiURL, - authorization: ac.Authorization, - client: client, + role: role, + apiURL: apiURL, + gw: gw, parseObject: parseObject, parseObjectList: parseObjectList, aws: make(map[*apiWatcher]struct{}), + awsPending: make(map[*apiWatcher]struct{}), objectsByKey: make(map[string]object), objectsCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects{role=%q}`, role)), @@ -288,45 +334,65 @@ func newURLWatcher(role, apiURL string, proxyURL *url.URL, ac *promauth.Config) objectsRemoved: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_removed_total{role=%q}`, role)), objectsUpdated: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_objects_updated_total{role=%q}`, role)), } + logger.Infof("started %s watcher for %q", uw.role, uw.apiURL) + go uw.watchForUpdates() + go uw.processPendingSubscribers() return uw } -func (uw *urlWatcher) initOnce() { - uw.once.Do(func() { - uw.reloadObjects() - go uw.watchForUpdates() - }) -} - -func (uw *urlWatcher) addAPIWatcher(aw *apiWatcher) { - uw.mu.Lock() - if _, ok := uw.aws[aw]; ok { - logger.Panicf("BUG: aw=%p has been already added", aw) +func (uw *urlWatcher) subscribeAPIWatcher(aw *apiWatcher) { + if aw == nil { + return } - uw.aws[aw] = struct{}{} - aw.reloadScrapeWorks(uw.objectsByKey) - uw.mu.Unlock() -} - -func (uw *urlWatcher) removeAPIWatcher(aw *apiWatcher) { uw.mu.Lock() if _, ok := uw.aws[aw]; !ok { - logger.Panicf("BUG: aw=%p is missing", aw) + if _, ok := uw.awsPending[aw]; !ok { + uw.awsPending[aw] = struct{}{} + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Inc() + } } - delete(uw.aws, aw) uw.mu.Unlock() } -// doRequest performs http request to the given requestURL. -func (uw *urlWatcher) doRequest(requestURL string) (*http.Response, error) { - req, err := http.NewRequest("GET", requestURL, nil) - if err != nil { - logger.Fatalf("cannot create a request for %q: %s", requestURL, err) +func (uw *urlWatcher) unsubscribeAPIWatcher(aw *apiWatcher) { + uw.mu.Lock() + if _, ok := uw.aws[aw]; ok { + delete(uw.aws, aw) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="permanent"}`, uw.role)).Dec() + } else if _, ok := uw.awsPending[aw]; ok { + delete(uw.awsPending, aw) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Dec() } - if uw.authorization != "" { - req.Header.Set("Authorization", uw.authorization) + uw.mu.Unlock() +} + +func (uw *urlWatcher) processPendingSubscribers() { + t := time.NewTicker(time.Second) + for range t.C { + var awsPending []*apiWatcher + var objectsByKey map[string]object + + uw.mu.Lock() + if len(uw.awsPending) > 0 { + awsPending = getAPIWatchers(uw.awsPending) + for _, aw := range awsPending { + if _, ok := uw.aws[aw]; ok { + logger.Panicf("BUG: aw=%p already exists in uw.aws", aw) + } + uw.aws[aw] = struct{}{} + delete(uw.awsPending, aw) + } + objectsByKey = make(map[string]object, len(uw.objectsByKey)) + for key, o := range uw.objectsByKey { + objectsByKey[key] = o + } + } + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="pending"}`, uw.role)).Add(-len(awsPending)) + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_subscibers{role=%q,type="permanent"}`, uw.role)).Add(len(awsPending)) + uw.mu.Unlock() + + uw.gw.reloadScrapeWorksForAPIWatchers(awsPending, objectsByKey) } - return uw.client.Do(req) } func (uw *urlWatcher) setResourceVersion(resourceVersion string) { @@ -346,7 +412,7 @@ func (uw *urlWatcher) reloadObjects() string { } requestURL := uw.apiURL - resp, err := uw.doRequest(requestURL) + resp, err := uw.gw.doRequest(requestURL) if err != nil { logger.Errorf("cannot perform request to %q: %s", requestURL, err) return "" @@ -386,22 +452,19 @@ func (uw *urlWatcher) reloadObjects() string { uw.objectsAdded.Add(added) uw.objectsCount.Add(added - removed) uw.resourceVersion = metadata.ResourceVersion + aws := getAPIWatchers(uw.aws) uw.mu.Unlock() - for _, aw := range uw.getAPIWatchers() { - aw.reloadScrapeWorks(objectsByKey) - } - logger.Infof("loaded %d objects from %q", len(objectsByKey), requestURL) + uw.gw.reloadScrapeWorksForAPIWatchers(aws, objectsByKey) + logger.Infof("reloaded %d objects from %q", len(objectsByKey), requestURL) return metadata.ResourceVersion } -func (uw *urlWatcher) getAPIWatchers() []*apiWatcher { - uw.mu.Lock() - aws := make([]*apiWatcher, 0, len(uw.aws)) - for aw := range uw.aws { +func getAPIWatchers(awsMap map[*apiWatcher]struct{}) []*apiWatcher { + aws := make([]*apiWatcher, 0, len(awsMap)) + for aw := range awsMap { aws = append(aws, aw) } - uw.mu.Unlock() return aws } @@ -423,7 +486,7 @@ func (uw *urlWatcher) watchForUpdates() { if strings.Contains(apiURL, "?") { delimiter = "&" } - timeoutSeconds := time.Duration(0.9 * float64(uw.client.Timeout)).Seconds() + timeoutSeconds := time.Duration(0.9 * float64(uw.gw.client.Timeout)).Seconds() apiURL += delimiter + "watch=1&allowWatchBookmarks=true&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds)) for { resourceVersion := uw.reloadObjects() @@ -431,7 +494,7 @@ func (uw *urlWatcher) watchForUpdates() { if resourceVersion != "" { requestURL += "&resourceVersion=" + url.QueryEscape(resourceVersion) } - resp, err := uw.doRequest(requestURL) + resp, err := uw.gw.doRequest(requestURL) if err != nil { logger.Errorf("cannot perform request to %q: %s", requestURL, err) backoffSleep() @@ -486,9 +549,10 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { uw.objectsUpdated.Inc() } uw.objectsByKey[key] = o + aws := getAPIWatchers(uw.aws) uw.mu.Unlock() - for _, aw := range uw.getAPIWatchers() { - labels := o.getTargetLabels(aw) + labels := o.getTargetLabels(uw.gw) + for _, aw := range aws { aw.setScrapeWorks(key, labels) } case "DELETED": @@ -503,8 +567,9 @@ func (uw *urlWatcher) readObjectUpdateStream(r io.Reader) error { uw.objectsRemoved.Inc() delete(uw.objectsByKey, key) } + aws := getAPIWatchers(uw.aws) uw.mu.Unlock() - for _, aw := range uw.getAPIWatchers() { + for _, aw := range aws { aw.removeScrapeWorks(key) } case "BOOKMARK": diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index 5119f513a..805a88b01 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -90,17 +90,17 @@ type EndpointPort struct { // getTargetLabels returns labels for each endpoint in eps. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#endpoints -func (eps *Endpoints) getTargetLabels(aw *apiWatcher) []map[string]string { +func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string { var svc *Service - if o := aw.getObjectByRole("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil { + if o := gw.getObjectByRole("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil { svc = o.(*Service) } podPortsSeen := make(map[*Pod][]int) var ms []map[string]string for _, ess := range eps.Subsets { for _, epp := range ess.Ports { - ms = appendEndpointLabelsForAddresses(ms, aw, podPortsSeen, eps, ess.Addresses, epp, svc, "true") - ms = appendEndpointLabelsForAddresses(ms, aw, podPortsSeen, eps, ess.NotReadyAddresses, epp, svc, "false") + ms = appendEndpointLabelsForAddresses(ms, gw, podPortsSeen, eps, ess.Addresses, epp, svc, "true") + ms = appendEndpointLabelsForAddresses(ms, gw, podPortsSeen, eps, ess.NotReadyAddresses, epp, svc, "false") } } @@ -135,11 +135,11 @@ func (eps *Endpoints) getTargetLabels(aw *apiWatcher) []map[string]string { return ms } -func appendEndpointLabelsForAddresses(ms []map[string]string, aw *apiWatcher, podPortsSeen map[*Pod][]int, eps *Endpoints, +func appendEndpointLabelsForAddresses(ms []map[string]string, gw *groupWatcher, podPortsSeen map[*Pod][]int, eps *Endpoints, eas []EndpointAddress, epp EndpointPort, svc *Service, ready string) []map[string]string { for _, ea := range eas { var p *Pod - if o := aw.getObjectByRole("pod", ea.TargetRef.Namespace, ea.TargetRef.Name); o != nil { + if o := gw.getObjectByRole("pod", ea.TargetRef.Namespace, ea.TargetRef.Name); o != nil { p = o.(*Pod) } m := getEndpointLabelsForAddressAndPort(podPortsSeen, eps, ea, epp, p, svc, ready) diff --git a/lib/promscrape/discovery/kubernetes/endpointslices.go b/lib/promscrape/discovery/kubernetes/endpointslices.go index a8739ee5b..5e1961e92 100644 --- a/lib/promscrape/discovery/kubernetes/endpointslices.go +++ b/lib/promscrape/discovery/kubernetes/endpointslices.go @@ -37,16 +37,16 @@ func parseEndpointSlice(data []byte) (object, error) { // getTargetLabels returns labels for eps. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#endpointslices -func (eps *EndpointSlice) getTargetLabels(aw *apiWatcher) []map[string]string { +func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string { var svc *Service - if o := aw.getObjectByRole("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil { + if o := gw.getObjectByRole("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil { svc = o.(*Service) } podPortsSeen := make(map[*Pod][]int) var ms []map[string]string for _, ess := range eps.Endpoints { var p *Pod - if o := aw.getObjectByRole("pod", ess.TargetRef.Namespace, ess.TargetRef.Name); o != nil { + if o := gw.getObjectByRole("pod", ess.TargetRef.Namespace, ess.TargetRef.Name); o != nil { p = o.(*Pod) } for _, epp := range eps.Ports { diff --git a/lib/promscrape/discovery/kubernetes/ingress.go b/lib/promscrape/discovery/kubernetes/ingress.go index d7f5df66f..cca6bdb24 100644 --- a/lib/promscrape/discovery/kubernetes/ingress.go +++ b/lib/promscrape/discovery/kubernetes/ingress.go @@ -87,7 +87,7 @@ type HTTPIngressPath struct { // getTargetLabels returns labels for ig. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ingress -func (ig *Ingress) getTargetLabels(aw *apiWatcher) []map[string]string { +func (ig *Ingress) getTargetLabels(gw *groupWatcher) []map[string]string { tlsHosts := make(map[string]bool) for _, tls := range ig.Spec.TLS { for _, host := range tls.Hosts { diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go index a7e392d65..6c990c846 100644 --- a/lib/promscrape/discovery/kubernetes/node.go +++ b/lib/promscrape/discovery/kubernetes/node.go @@ -76,7 +76,7 @@ type NodeDaemonEndpoints struct { // getTargetLabels returs labels for the given n. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#node -func (n *Node) getTargetLabels(aw *apiWatcher) []map[string]string { +func (n *Node) getTargetLabels(gw *groupWatcher) []map[string]string { addr := getNodeAddr(n.Status.Addresses) if len(addr) == 0 { // Skip node without address diff --git a/lib/promscrape/discovery/kubernetes/pod.go b/lib/promscrape/discovery/kubernetes/pod.go index c3e3f314a..8a88ffaca 100644 --- a/lib/promscrape/discovery/kubernetes/pod.go +++ b/lib/promscrape/discovery/kubernetes/pod.go @@ -97,7 +97,7 @@ type PodCondition struct { // getTargetLabels returns labels for each port of the given p. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#pod -func (p *Pod) getTargetLabels(aw *apiWatcher) []map[string]string { +func (p *Pod) getTargetLabels(gw *groupWatcher) []map[string]string { if len(p.Status.PodIP) == 0 { // Skip pod without IP return nil diff --git a/lib/promscrape/discovery/kubernetes/service.go b/lib/promscrape/discovery/kubernetes/service.go index c5c0ddd8c..b74bd2653 100644 --- a/lib/promscrape/discovery/kubernetes/service.go +++ b/lib/promscrape/discovery/kubernetes/service.go @@ -71,7 +71,7 @@ type ServicePort struct { // getTargetLabels returns labels for each port of the given s. // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#service -func (s *Service) getTargetLabels(aw *apiWatcher) []map[string]string { +func (s *Service) getTargetLabels(gw *groupWatcher) []map[string]string { host := fmt.Sprintf("%s.%s.svc", s.Metadata.Name, s.Metadata.Namespace) var ms []map[string]string for _, sp := range s.Spec.Ports { From bf15d6a6a2d8600e77ebb8f71d9d598320604d82 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 21:52:02 +0200 Subject: [PATCH 38/59] lib/promscrape: remove duplicate `target` word in error message --- lib/promscrape/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index b637b8316..a65f651c3 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -251,7 +251,7 @@ func (cfg *Config) getKubernetesSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork { target := metaLabels["__address__"] sw, err := sc.swc.getScrapeWork(target, nil, metaLabels) if err != nil { - logger.Errorf("cannot create kubernetes_sd_config target target %q for job_name %q: %s", target, sc.swc.jobName, err) + logger.Errorf("cannot create kubernetes_sd_config target %q for job_name %q: %s", target, sc.swc.jobName, err) return nil } return sw From 83edbb7cab55f280baf641df025f1d7932d045fb Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 21:53:20 +0200 Subject: [PATCH 39/59] lib/promscrape: retry service discovery in a few seconds if it starts returning 0 targets This should reduce recovery time from temporary issues during service discovery --- lib/promscrape/scraper.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go index 3c2651369..c0b68edc2 100644 --- a/lib/promscrape/scraper.go +++ b/lib/promscrape/scraper.go @@ -231,11 +231,17 @@ func (scfg *scrapeConfig) run() { cfg := <-scfg.cfgCh var swsPrev []*ScrapeWork updateScrapeWork := func(cfg *Config) { - startTime := time.Now() - sws := scfg.getScrapeWork(cfg, swsPrev) - sg.update(sws) - swsPrev = sws - scfg.discoveryDuration.UpdateDuration(startTime) + for { + startTime := time.Now() + sws := scfg.getScrapeWork(cfg, swsPrev) + retry := sg.update(sws) + swsPrev = sws + scfg.discoveryDuration.UpdateDuration(startTime) + if !retry { + return + } + time.Sleep(2 * time.Second) + } } updateScrapeWork(cfg) atomic.AddInt32(&PendingScrapeConfigs, -1) @@ -295,7 +301,7 @@ func (sg *scraperGroup) stop() { sg.wg.Wait() } -func (sg *scraperGroup) update(sws []*ScrapeWork) { +func (sg *scraperGroup) update(sws []*ScrapeWork) (retry bool) { sg.mLock.Lock() defer sg.mLock.Unlock() @@ -352,6 +358,7 @@ func (sg *scraperGroup) update(sws []*ScrapeWork) { sg.changesCount.Add(additionsCount + deletionsCount) logger.Infof("%s: added targets: %d, removed targets: %d; total targets: %d", sg.name, additionsCount, deletionsCount, len(sg.m)) } + return deletionsCount > 0 && len(sg.m) == 0 } type scraper struct { From b88806ecbf408105a1834381fc2176aa62c8b618 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 21:55:00 +0200 Subject: [PATCH 40/59] lib/promscrape/discovery/kubernetes: do not start object watcher until initial objects are loaded --- lib/promscrape/discovery/kubernetes/api_watcher.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 1e09e8955..d0cc9bb6c 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -490,10 +490,11 @@ func (uw *urlWatcher) watchForUpdates() { apiURL += delimiter + "watch=1&allowWatchBookmarks=true&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds)) for { resourceVersion := uw.reloadObjects() - requestURL := apiURL - if resourceVersion != "" { - requestURL += "&resourceVersion=" + url.QueryEscape(resourceVersion) + if resourceVersion == "" { + backoffSleep() + continue } + requestURL := apiURL + "&resourceVersion=" + url.QueryEscape(resourceVersion) resp, err := uw.gw.doRequest(requestURL) if err != nil { logger.Errorf("cannot perform request to %q: %s", requestURL, err) From 7f6f350ee19e45002cac83bbfda22d2bf6309ae4 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 22:24:03 +0200 Subject: [PATCH 41/59] lib/promscrape: return back the logic for flushing big buffers to storage from the commit 3fd8653b40113de40b6dc265724104fcf2f776d9 This should reduce memory usage when vmagent scrapes targets with big number of metrics and `-promscrape.streamParse` isn't enabled --- lib/promscrape/scrapework.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index d19ae8b34..e6b537b68 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -294,10 +294,22 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error srcRows := wc.rows.Rows samplesScraped := len(srcRows) scrapedSamples.Update(float64(samplesScraped)) + samplesPostRelabeling := 0 for i := range srcRows { sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true) + if len(wc.labels) > 40000 { + // Limit the maximum size of wc.writeRequest. + // This should reduce memory usage when scraping targets with millions of metrics and/or labels. + // For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/ + samplesPostRelabeling += len(wc.writeRequest.Timeseries) + sw.updateSeriesAdded(wc) + startTime := time.Now() + sw.PushData(&wc.writeRequest) + pushDataDuration.UpdateDuration(startTime) + wc.resetNoRows() + } } - samplesPostRelabeling := len(wc.writeRequest.Timeseries) + samplesPostRelabeling += len(wc.writeRequest.Timeseries) if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit { wc.resetNoRows() up = 0 From c14dafce43be1f8811323a13186be3d7b5a1ab70 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 14 Mar 2021 22:56:23 +0200 Subject: [PATCH 42/59] lib/promscrape: an attempt to reduce memory usage when vmagent scrapes targets with varying number of metrics Do not cache too big byte buffers and too big writeRequestCtx objects, since it is cheaper to re-create them instead of wasting RAM for their caching. This reverts 7f6f350ee19e45002cac83bbfda22d2bf6309ae4 --- lib/leveledbytebufferpool/pool.go | 14 ++++++---- lib/promscrape/scrapework.go | 44 ++++++++++++------------------- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/lib/leveledbytebufferpool/pool.go b/lib/leveledbytebufferpool/pool.go index 59f660211..96ba6a2d1 100644 --- a/lib/leveledbytebufferpool/pool.go +++ b/lib/leveledbytebufferpool/pool.go @@ -15,7 +15,9 @@ import ( // ... // pools[n] is for capacities from 2^(n+2)+1 to 2^(n+3) // -var pools [30]sync.Pool +// Limit the maximum capacity to 2^18, since there are no performance benefits +// in caching byte slices with bigger capacities. +var pools [17]sync.Pool // Get returns byte buffer with the given capacity. func Get(capacity int) *bytesutil.ByteBuffer { @@ -37,9 +39,11 @@ func Get(capacity int) *bytesutil.ByteBuffer { // Put returns bb to the pool. func Put(bb *bytesutil.ByteBuffer) { capacity := cap(bb.B) - id, _ := getPoolIDAndCapacity(capacity) - bb.Reset() - pools[id].Put(bb) + id, poolCapacity := getPoolIDAndCapacity(capacity) + if capacity <= poolCapacity { + bb.Reset() + pools[id].Put(bb) + } } func getPoolIDAndCapacity(size int) (int, int) { @@ -49,7 +53,7 @@ func getPoolIDAndCapacity(size int) (int, int) { } size >>= 3 id := bits.Len(uint(size)) - if id > len(pools) { + if id >= len(pools) { id = len(pools) - 1 } return id, (1 << (id + 3)) diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index e6b537b68..b9da864d5 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -177,9 +177,9 @@ type scrapeWork struct { // It is used as a hint in order to reduce memory usage for body buffers. prevBodyLen int - // prevRowsLen contains the number rows scraped during the previous scrape. + // prevLabelsLen contains the number labels scraped during the previous scrape. // It is used as a hint in order to reduce memory usage when parsing scrape responses. - prevRowsLen int + prevLabelsLen int } func (sw *scrapeWork) run(stopCh <-chan struct{}) { @@ -283,7 +283,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error scrapeDuration.Update(duration) scrapeResponseSize.Update(float64(len(body.B))) up := 1 - wc := writeRequestCtxPool.Get(sw.prevRowsLen) + wc := writeRequestCtxPool.Get(sw.prevLabelsLen) if err != nil { up = 0 scrapesFailed.Inc() @@ -294,22 +294,10 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error srcRows := wc.rows.Rows samplesScraped := len(srcRows) scrapedSamples.Update(float64(samplesScraped)) - samplesPostRelabeling := 0 for i := range srcRows { sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true) - if len(wc.labels) > 40000 { - // Limit the maximum size of wc.writeRequest. - // This should reduce memory usage when scraping targets with millions of metrics and/or labels. - // For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/ - samplesPostRelabeling += len(wc.writeRequest.Timeseries) - sw.updateSeriesAdded(wc) - startTime := time.Now() - sw.PushData(&wc.writeRequest) - pushDataDuration.UpdateDuration(startTime) - wc.resetNoRows() - } } - samplesPostRelabeling += len(wc.writeRequest.Timeseries) + samplesPostRelabeling := len(wc.writeRequest.Timeseries) if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit { wc.resetNoRows() up = 0 @@ -325,7 +313,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error startTime := time.Now() sw.PushData(&wc.writeRequest) pushDataDuration.UpdateDuration(startTime) - sw.prevRowsLen = samplesScraped + sw.prevLabelsLen = len(wc.labels) wc.reset() writeRequestCtxPool.Put(wc) // body must be released only after wc is released, since wc refers to body. @@ -339,7 +327,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error { samplesScraped := 0 samplesPostRelabeling := 0 responseSize := int64(0) - wc := writeRequestCtxPool.Get(sw.prevRowsLen) + wc := writeRequestCtxPool.Get(sw.prevLabelsLen) sr, err := sw.GetStreamReader() if err != nil { @@ -389,7 +377,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error { startTime := time.Now() sw.PushData(&wc.writeRequest) pushDataDuration.UpdateDuration(startTime) - sw.prevRowsLen = len(wc.rows.Rows) + sw.prevLabelsLen = len(wc.labels) wc.reset() writeRequestCtxPool.Put(wc) tsmGlobal.Update(sw.Config, sw.ScrapeGroup, up == 1, realTimestamp, int64(duration*1000), err) @@ -401,11 +389,11 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error { // // Its logic has been copied from leveledbytebufferpool. type leveledWriteRequestCtxPool struct { - pools [30]sync.Pool + pools [13]sync.Pool } -func (lwp *leveledWriteRequestCtxPool) Get(rowsCapacity int) *writeRequestCtx { - id, capacityNeeded := lwp.getPoolIDAndCapacity(rowsCapacity) +func (lwp *leveledWriteRequestCtxPool) Get(labelsCapacity int) *writeRequestCtx { + id, capacityNeeded := lwp.getPoolIDAndCapacity(labelsCapacity) for i := 0; i < 2; i++ { if id < 0 || id >= len(lwp.pools) { break @@ -421,10 +409,12 @@ func (lwp *leveledWriteRequestCtxPool) Get(rowsCapacity int) *writeRequestCtx { } func (lwp *leveledWriteRequestCtxPool) Put(wc *writeRequestCtx) { - capacity := cap(wc.rows.Rows) - id, _ := lwp.getPoolIDAndCapacity(capacity) - wc.reset() - lwp.pools[id].Put(wc) + capacity := cap(wc.labels) + id, poolCapacity := lwp.getPoolIDAndCapacity(capacity) + if capacity <= poolCapacity { + wc.reset() + lwp.pools[id].Put(wc) + } } func (lwp *leveledWriteRequestCtxPool) getPoolIDAndCapacity(size int) (int, int) { @@ -434,7 +424,7 @@ func (lwp *leveledWriteRequestCtxPool) getPoolIDAndCapacity(size int) (int, int) } size >>= 3 id := bits.Len(uint(size)) - if id > len(lwp.pools) { + if id >= len(lwp.pools) { id = len(lwp.pools) - 1 } return id, (1 << (id + 3)) From 6d91842c83ed395194474e855f08a4442533f073 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 11:50:56 +0200 Subject: [PATCH 43/59] docs/Cluster-VictoriaMetrics.md: sync with cluster README.md --- docs/Cluster-VictoriaMetrics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index 1fe691492..972d42938 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -373,7 +373,7 @@ for protecting from user errors such as accidental data deletion. The following steps must be performed for each `vmstorage` node for creating a backup: 1. Create an instant snapshot by navigating to `/snapshot/create` HTTP handler. It will create snapshot and return its name. -2. Archive the created snapshot from `<-storageDataPath>/snapshots/` folder using [vmbackup](https://victoriametrics.github.io/vbackup.html). +2. Archive the created snapshot from `<-storageDataPath>/snapshots/` folder using [vmbackup](https://victoriametrics.github.io/vmbackup.html). The archival process doesn't interfere with `vmstorage` work, so it may be performed at any suitable time. 3. Delete unused snapshots via `/snapshot/delete?snapshot=` or `/snapshot/delete_all` in order to free up occupied storage space. From b457739f87c4bd30ccf2d51f69829b6fa2b2e336 Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Mon, 15 Mar 2021 10:04:24 +0000 Subject: [PATCH 44/59] Single dashboard (#1126) * dashboard: update single node dashboard * add panel `Open FDs` for file descriptors metrics; * add panel `Disk writes/reads` to show the real read/write load on storage layer; * add `process_resident_memory_bytes` metric to memory usage panel; * add stats panel to show available CPUs, memory and disk space; * rm flags panel since it didn't prove its usefulness. * alerts: add alert for reaching FDs limit --- dashboards/victoriametrics.json | 3198 ++++++++++++++++++------------- deployment/docker/alerts.yml | 10 + 2 files changed, 1861 insertions(+), 1347 deletions(-) diff --git a/dashboards/victoriametrics.json b/dashboards/victoriametrics.json index 484e972ab..6e6aa5f02 100644 --- a/dashboards/victoriametrics.json +++ b/dashboards/victoriametrics.json @@ -25,12 +25,6 @@ "name": "Singlestat", "version": "" }, - { - "type": "panel", - "id": "table-old", - "name": "Table (old)", - "version": "" - }, { "type": "panel", "id": "text", @@ -51,12 +45,12 @@ } ] }, - "description": "Overview for single node VictoriaMetrics v1.48.0 or higher", + "description": "Overview for single node VictoriaMetrics v1.55.1 or higher", "editable": true, "gnetId": 10229, "graphTooltip": 0, "id": null, - "iteration": 1613320710757, + "iteration": 1615713966732, "links": [ { "icon": "doc", @@ -64,7 +58,7 @@ "targetBlank": true, "title": "Single server Wiki", "type": "link", - "url": "https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/Single-server-VictoriaMetrics" + "url": "https://victoriametrics.github.io/" }, { "icon": "external link", @@ -110,8 +104,8 @@ "overrides": [] }, "gridPos": { - "h": 3, - "w": 8, + "h": 2, + "w": 6, "x": 0, "y": 1 }, @@ -134,98 +128,6 @@ "title": "Version", "type": "text" }, - { - "columns": [], - "datasource": "$ds", - "description": "Run VM with `-help` flag to see all the available flags with description and default values", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fontSize": "100%", - "gridPos": { - "h": 9, - "w": 16, - "x": 8, - "y": 1 - }, - "id": 4, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 4, - "desc": true - }, - "styles": [ - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "name", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "value", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "flag{job=\"$job\", instance=~\"$instance\", value!=\"secret\", value!=\"\"}", - "format": "table", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Flags", - "transform": "table", - "type": "table-old" - }, { "cacheTimeout": null, "colorBackground": false, @@ -253,9 +155,9 @@ }, "gridPos": { "h": 2, - "w": 8, - "x": 0, - "y": 4 + "w": 6, + "x": 6, + "y": 1 }, "id": 26, "interval": null, @@ -316,6 +218,278 @@ ], "valueName": "current" }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$ds", + "description": "The size of the free disk space left", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 2, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 80, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(vm_free_disk_space_bytes{job=\"$job\", instance=~\"$instance\", path=\"/storage\"})", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Free disk space", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$ds", + "description": "Total size of available memory for VM process", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 2, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 78, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(vm_available_memory_bytes{job=\"$job\", instance=~\"$instance\"})", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Available memory", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$ds", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 2, + "w": 6, + "x": 0, + "y": 3 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "vm_app_uptime_seconds{instance=\"victoriametrics:8428\", job=\"victoriametrics\"}", + "targets": [ + { + "expr": "vm_app_uptime_seconds{job=\"$job\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Uptime", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, { "cacheTimeout": null, "colorBackground": false, @@ -343,9 +517,9 @@ }, "gridPos": { "h": 2, - "w": 8, - "x": 0, - "y": 6 + "w": 6, + "x": 6, + "y": 3 }, "id": 38, "interval": null, @@ -416,13 +590,14 @@ "#d44a3a" ], "datasource": "$ds", + "description": "Total number of available CPUs for VM process", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, - "format": "s", + "format": "short", "gauge": { "maxValue": 100, "minValue": 0, @@ -432,11 +607,11 @@ }, "gridPos": { "h": 2, - "w": 8, - "x": 0, - "y": 8 + "w": 6, + "x": 12, + "y": 3 }, - "id": 8, + "id": 77, "interval": null, "links": [], "mappingType": 1, @@ -466,23 +641,118 @@ ], "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, + "full": true, "lineColor": "rgb(31, 120, 193)", - "show": false + "show": true }, - "tableColumn": "vm_app_uptime_seconds{instance=\"victoriametrics:8428\", job=\"victoriametrics\"}", + "tableColumn": "", "targets": [ { - "expr": "vm_app_uptime_seconds{job=\"$job\", instance=\"$instance\"}", + "expr": "sum(vm_available_cpu_cores{job=\"$job\", instance=~\"$instance\"})", "format": "time_series", + "instant": false, + "interval": "", "intervalFactor": 1, + "legendFormat": "", "refId": "A" } ], "thresholds": "", "timeFrom": null, "timeShift": null, - "title": "Uptime", + "title": "Available CPU", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$ds", + "description": "Total size of allowed memory via flag `-memory.allowedPercent`", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 2, + "w": 6, + "x": 18, + "y": 3 + }, + "id": 79, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(vm_allowed_memory_bytes{job=\"$job\", instance=~\"$instance\"})", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Allowed memory", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -501,7 +771,7 @@ "h": 1, "w": 24, "x": 0, - "y": 10 + "y": 5 }, "id": 24, "panels": [], @@ -528,7 +798,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11 + "y": 6 }, "hiddenSeries": false, "id": 12, @@ -628,7 +898,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11 + "y": 6 }, "hiddenSeries": false, "id": 22, @@ -727,7 +997,7 @@ "h": 8, "w": 12, "x": 0, - "y": 19 + "y": 14 }, "hiddenSeries": false, "id": 51, @@ -829,7 +1099,7 @@ "h": 8, "w": 12, "x": 12, - "y": 19 + "y": 14 }, "hiddenSeries": false, "id": 33, @@ -939,7 +1209,7 @@ "h": 8, "w": 12, "x": 0, - "y": 27 + "y": 22 }, "hiddenSeries": false, "id": 59, @@ -1055,7 +1325,7 @@ "h": 8, "w": 12, "x": 12, - "y": 27 + "y": 22 }, "hiddenSeries": false, "id": 35, @@ -1135,1124 +1405,1238 @@ } }, { - "collapsed": false, + "collapsed": true, "datasource": "$ds", "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 35 + "y": 30 }, "id": 14, - "panels": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "How many datapoints are inserted into storage per second", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 40 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(vm_rows_inserted_total{job=\"$job\", instance=\"$instance\"}[$__interval])) by (type) > 0", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Datapoints ingestion rate ($instance)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows the time needed to reach the 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.\n\n", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 40 + }, + "hiddenSeries": false, + "id": 73, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": true, + "max": false, + "min": false, + "show": false, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "vm_free_disk_space_bytes{job=\"$job\", instance=\"$instance\"} / ignoring(path) ((rate(vm_rows_added_to_storage_total{job=\"$job\", instance=\"$instance\"}[1d]) - ignoring(type) rate(vm_deduplicated_samples_total{job=\"$job\", instance=\"$instance\", type=\"merge\"}[1d])) * scalar(sum(vm_data_size_bytes{job=\"$job\", instance=\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=\"$job\", instance=\"$instance\", type!=\"indexdb\"})))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Storage full ETA ($instance)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows how many datapoints are in the storage and what is average disk usage per datapoint.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "bytes-per-datapoint", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_rows{job=\"$job\", instance=~\"$instance\", type != \"indexdb\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "total datapoints", + "refId": "A" + }, + { + "expr": "sum(vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=\"$job\", instance=~\"$instance\", type != \"indexdb\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "bytes-per-datapoint", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Datapoints ($instance)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 2, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "How many datapoints are in RAM queue waiting to be written into storage. The number of pending data points should be in the range from 0 to `2*`, since VictoriaMetrics pushes pending data to persistent storage every second.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 48 + }, + "hiddenSeries": false, + "id": 34, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "pending index entries", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "vm_pending_rows{job=\"$job\", instance=~\"$instance\", type=\"storage\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "pending datapoints", + "refId": "A" + }, + { + "expr": "vm_pending_rows{job=\"$job\", instance=~\"$instance\", type=\"indexdb\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "pending index entries", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pending datapoints ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 3, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows amount of on-disk space occupied by data points and the remaining disk space at `-storageDataPath`", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 56 + }, + "hiddenSeries": false, + "id": 53, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type!=\"indexdb\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Used", + "refId": "A" + }, + { + "expr": "vm_free_disk_space_bytes{job=\"$job\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Free", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk space usage - datapoints ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Data parts of LSM tree.\nHigh number of parts could be an evidence of slow merge performance - check the resource utilization.\n* `indexdb` - inverted index\n* `storage/small` - recently added parts of data ingested into storage(hot data)\n* `storage/big` - small parts gradually merged into big parts (cold data)", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 56 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_parts{job=\"$job\", instance=\"$instance\"}) by (type)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "LSM parts ($instance)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows amount of on-disk space occupied by inverted index.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 64 + }, + "hiddenSeries": false, + "id": 55, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type=\"indexdb\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk space usage - index ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "The number of on-going merges in storage nodes. It is expected to have high numbers for `storage/small` metric.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 64 + }, + "hiddenSeries": false, + "id": 62, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_active_merges{job=\"$job\", instance=\"$instance\"}) by(type)", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active merges ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows the number of bytes read/write from the storage layer.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 72 + }, + "hiddenSeries": false, + "id": 76, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(process_io_storage_read_bytes_total{job=\"$job\", instance=\"$instance\"}[5m]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "read", + "refId": "A" + }, + { + "expr": "sum(rate(process_io_storage_written_bytes_total{job=\"$job\", instance=\"$instance\"}[5m]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "write", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk writes/reads ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "The number of rows merged per second by storage nodes.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 72 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(vm_rows_merged_total{job=\"$job\", instance=\"$instance\"}[5m])) by(type)", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Merge speed ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows how many rows were ignored on insertion due to corrupted or out of retention timestamps.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 80 + }, + "hiddenSeries": false, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_rows_ignored_total{job=\"$job\", instance=\"$instance\"}) by (reason) > 0", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{reason}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rows ignored ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "Shows the rate of logging the messages by their level. Unexpected spike in rate is a good reason to check logs.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 80 + }, + "hiddenSeries": false, + "id": 67, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(vm_log_messages_total{job=\"$job\", instance=\"$instance\"}[5m])) by (level) ", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{level}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Logging rate ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], "title": "Storage", "type": "row" }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "How many datapoints are inserted into storage per second", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 36 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(vm_rows_inserted_total{job=\"$job\", instance=\"$instance\"}[$__interval])) by (type) > 0", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Datapoints ingestion rate ($instance)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows the time needed to reach the 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.\n\n", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 36 - }, - "hiddenSeries": false, - "id": 73, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideZero": true, - "max": false, - "min": false, - "show": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "vm_free_disk_space_bytes{job=\"$job\", instance=\"$instance\"} / ignoring(path) ((rate(vm_rows_added_to_storage_total{job=\"$job\", instance=\"$instance\"}[1d]) - ignoring(type) rate(vm_deduplicated_samples_total{job=\"$job\", instance=\"$instance\", type=\"merge\"}[1d])) * scalar(sum(vm_data_size_bytes{job=\"$job\", instance=\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=\"$job\", instance=\"$instance\", type!=\"indexdb\"})))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Storage full ETA ($instance)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows how many datapoints are in the storage and what is average disk usage per datapoint.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 44 - }, - "hiddenSeries": false, - "id": 30, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "bytes-per-datapoint", - "yaxis": 2 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_rows{job=\"$job\", instance=~\"$instance\", type != \"indexdb\"})", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "total datapoints", - "refId": "A" - }, - { - "expr": "sum(vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=\"$job\", instance=~\"$instance\", type != \"indexdb\"})", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "bytes-per-datapoint", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Datapoints ($instance)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "decimals": 2, - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "How many datapoints are in RAM queue waiting to be written into storage. The number of pending data points should be in the range from 0 to `2*`, since VictoriaMetrics pushes pending data to persistent storage every second.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 44 - }, - "hiddenSeries": false, - "id": 34, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "pending index entries", - "yaxis": 2 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "vm_pending_rows{job=\"$job\", instance=~\"$instance\", type=\"storage\"}", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "pending datapoints", - "refId": "A" - }, - { - "expr": "vm_pending_rows{job=\"$job\", instance=~\"$instance\", type=\"indexdb\"}", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "pending index entries", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Pending datapoints ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "decimals": 3, - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows amount of on-disk space occupied by data points and the remaining disk space at `-storageDataPath`", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 52 - }, - "hiddenSeries": false, - "id": 53, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type!=\"indexdb\"})", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "Used", - "refId": "A" - }, - { - "expr": "vm_free_disk_space_bytes{job=\"$job\", instance=\"$instance\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "Free", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk space usage - datapoints ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Data parts of LSM tree.\nHigh number of parts could be an evidence of slow merge performance - check the resource utilization.\n* `indexdb` - inverted index\n* `storage/small` - recently added parts of data ingested into storage(hot data)\n* `storage/big` - small parts gradually merged into big parts (cold data)", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 52 - }, - "hiddenSeries": false, - "id": 36, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_parts{job=\"$job\", instance=\"$instance\"}) by (type)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "LSM parts ($instance)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "The number of on-going merges in storage nodes. It is expected to have high numbers for `storage/small` metric.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 60 - }, - "hiddenSeries": false, - "id": 62, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_active_merges{job=\"$job\", instance=\"$instance\"}) by(type)", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Active merges ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows amount of on-disk space occupied by inverted index.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 60 - }, - "hiddenSeries": false, - "id": 55, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "vm_data_size_bytes{job=\"$job\", instance=~\"$instance\", type=\"indexdb\"}", - "format": "time_series", - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Disk space usage - index ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows how many rows were ignored on insertion due to corrupted or out of retention timestamps.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 68 - }, - "hiddenSeries": false, - "id": 58, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_rows_ignored_total{job=\"$job\", instance=\"$instance\"}) by (reason) > 0", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{reason}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rows ignored ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "The number of rows merged per second by storage nodes.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 68 - }, - "hiddenSeries": false, - "id": 64, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(vm_rows_merged_total{job=\"$job\", instance=\"$instance\"}[5m])) by(type)", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Merge speed ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "Shows the rate of logging the messages by their level. Unexpected spike in rate is a good reason to check logs.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 76 - }, - "hiddenSeries": false, - "id": 67, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(vm_log_messages_total{job=\"$job\", instance=\"$instance\"}[5m])) by (level) ", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{level}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Logging rate ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "collapsed": true, "datasource": "$ds", @@ -2260,7 +2644,7 @@ "h": 1, "w": 24, "x": 0, - "y": 84 + "y": 31 }, "id": 71, "panels": [ @@ -2664,7 +3048,7 @@ "h": 1, "w": 24, "x": 0, - "y": 85 + "y": 32 }, "id": 46, "panels": [ @@ -2688,7 +3072,7 @@ "h": 8, "w": 12, "x": 0, - "y": 102 + "y": 103 }, "hiddenSeries": false, "id": 44, @@ -2738,6 +3122,15 @@ "intervalFactor": 1, "legendFormat": "stack inuse", "refId": "C" + }, + { + "expr": "sum(process_resident_memory_bytes{job=\"$job\", instance=\"$instance\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "resident", + "refId": "D" } ], "thresholds": [], @@ -2747,7 +3140,7 @@ "title": "Memory usage ($instance)", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2800,7 +3193,7 @@ "h": 8, "w": 12, "x": 12, - "y": 102 + "y": 103 }, "hiddenSeries": false, "id": 57, @@ -2883,6 +3276,7 @@ "dashLength": 10, "dashes": false, "datasource": "$ds", + "description": "Panel shows the number of open file descriptors in the OS.\nReaching the limit of open files can cause various issues and must be prevented.\n\nSee how to change limits here https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a", "fieldConfig": { "defaults": { "custom": {}, @@ -2896,10 +3290,10 @@ "h": 8, "w": 12, "x": 0, - "y": 110 + "y": 111 }, "hiddenSeries": false, - "id": 47, + "id": 75, "legend": { "avg": false, "current": false, @@ -2918,24 +3312,38 @@ "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "max", + "color": "#C4162A" + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum(go_goroutines{job=\"$job\", instance=\"$instance\"})", + "expr": "sum(process_open_fds{job=\"$job\", instance=\"$instance\"})", "format": "time_series", + "interval": "", "intervalFactor": 2, - "legendFormat": "gc duration", + "legendFormat": "open", "refId": "A" + }, + { + "expr": "min(process_max_fds{job=\"$job\", instance=\"$instance\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "max", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Goroutines ($instance)", + "title": "Open FDs ($instance)", "tooltip": { "shared": true, "sort": 0, @@ -2954,7 +3362,7 @@ "decimals": 0, "format": "short", "label": null, - "logBase": 1, + "logBase": 2, "max": null, "min": "0", "show": true @@ -2992,7 +3400,7 @@ "h": 8, "w": 12, "x": 12, - "y": 110 + "y": 111 }, "hiddenSeries": false, "id": 42, @@ -3087,7 +3495,201 @@ "h": 8, "w": 12, "x": 0, - "y": 118 + "y": 119 + }, + "hiddenSeries": false, + "id": 47, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(go_goroutines{job=\"$job\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "gc duration", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Goroutines ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 119 + }, + "hiddenSeries": false, + "id": 37, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(vm_tcplistener_conns{job=\"$job\", instance=\"$instance\"})", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "connections", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "TCP connections ($instance)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$ds", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 127 }, "hiddenSeries": false, "id": 48, @@ -3184,105 +3786,7 @@ "h": 8, "w": 12, "x": 12, - "y": 118 - }, - "hiddenSeries": false, - "id": 37, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(vm_tcplistener_conns{job=\"$job\", instance=\"$instance\"})", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "connections", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "TCP connections ($instance)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$ds", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 126 + "y": 127 }, "hiddenSeries": false, "id": 49, diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml index 77f72854b..a0a32cdbd 100644 --- a/deployment/docker/alerts.yml +++ b/deployment/docker/alerts.yml @@ -122,6 +122,16 @@ groups: description: "High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series." + - alert: ProcessNearFDLimits + expr: process_open_fds / process_max_fds > 0.8 + for: 10m + labels: + severity: critical + annotations: + summary: "Number of free file descriptors is less than 20% for \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") for the last 10m" + description: "Exhausting OS file descriptors limit can cause severe degradation of the process. + Consider to increase the limit as fast as possible." + # Alerts group for vmagent assumes that Grafana dashboard # https://grafana.com/grafana/dashboards/12683 is installed. # Pls update the `dashboard` annotation according to your setup. From 2dae0a2c47a116c0020ae7965b43d88f0c6f0c11 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 12:35:44 +0200 Subject: [PATCH 45/59] app/vmselect: add `round_digits` query arg to `/api/v1/query` and `/api/v1/query_range` handlers for limiting the number of decimal digits after the point --- README.md | 5 ++++- app/vmselect/prometheus/prometheus.go | 14 ++++++++++++++ app/vmselect/promql/eval.go | 8 ++++++-- app/vmselect/promql/exec.go | 9 +++++++++ docs/CHANGELOG.md | 1 + docs/Single-server-VictoriaMetrics.md | 5 ++++- 6 files changed, 38 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3ace5755d..ef1c954be 100644 --- a/README.md +++ b/README.md @@ -562,14 +562,17 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`. +VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v1/query_range` handlers. It can be used for rounding response values to the given number of digits after the decimal point. For example, `/api/v1/query?query=avg_over_time(temperature[1h])&round_digits=2` would round response values to up to two digits after the decimal point. + By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers. -See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details: * Any number [time series selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) via `match[]` query arg. * Optional `start` and `end` query args for limiting the time range for the selected labels or label values. +See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details. + Additionally VictoriaMetrics provides the following handlers: * `/api/v1/series/count` - returns the total number of time series in the database. Some notes: diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 93f5f3a8c..e9e374333 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -1022,6 +1022,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r), Deadline: deadline, LookbackDelta: lookbackDelta, + RoundDigits: getRoundDigits(r), EnforcedTagFilters: etf, } result, err := promql.Exec(&ec, query, true) @@ -1126,6 +1127,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, Deadline: deadline, MayCache: mayCache, LookbackDelta: lookbackDelta, + RoundDigits: getRoundDigits(r), EnforcedTagFilters: etf, } result, err := promql.Exec(&ec, query, false) @@ -1302,6 +1304,18 @@ func getMatchesFromRequest(r *http.Request) []string { return matches } +func getRoundDigits(r *http.Request) int { + s := r.FormValue("round_digits") + if len(s) == 0 { + return 100 + } + n, err := strconv.Atoi(s) + if err != nil { + return 100 + } + return n +} + func getLatencyOffsetMilliseconds() int64 { d := latencyOffset.Milliseconds() if d <= 1000 { diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index c02b18284..2db1a2f43 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -98,11 +98,14 @@ type EvalConfig struct { // LookbackDelta is analog to `-query.lookback-delta` from Prometheus. LookbackDelta int64 - timestamps []int64 - timestampsOnce sync.Once + // How many decimal digits after the point to leave in response. + RoundDigits int // EnforcedTagFilters used for apply additional label filters to query. EnforcedTagFilters []storage.TagFilter + + timestamps []int64 + timestampsOnce sync.Once } // newEvalConfig returns new EvalConfig copy from src. @@ -114,6 +117,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig { ec.Deadline = src.Deadline ec.MayCache = src.MayCache ec.LookbackDelta = src.LookbackDelta + ec.RoundDigits = src.RoundDigits ec.EnforcedTagFilters = src.EnforcedTagFilters // do not copy src.timestamps - they must be generated again. diff --git a/app/vmselect/promql/exec.go b/app/vmselect/promql/exec.go index 151f2dbde..25e9953d9 100644 --- a/app/vmselect/promql/exec.go +++ b/app/vmselect/promql/exec.go @@ -12,6 +12,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metricsql" @@ -72,6 +73,14 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, if err != nil { return nil, err } + if n := ec.RoundDigits; n < 100 { + for i := range result { + values := result[i].Values + for j, v := range values { + values[j] = decimal.RoundToDecimalDigits(v, n) + } + } + } return result, err } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 4442b209b..3b0439047 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -12,6 +12,7 @@ * FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. * FEATURE: vmagent: support `proxy_tls_config`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` options at `scrape_config` section for configuring proxies specified via `proxy_url`. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-targets-via-a-proxy). * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. +* FEATURE: accept `round_digits` query arg at `/api/v1/query` and `/api/v1/query_range` handlers. This option can be set at Prometheus datasource in Grafana for limiting the number of digits after the decimal point in response values. * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 3ace5755d..ef1c954be 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -562,14 +562,17 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`. +VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v1/query_range` handlers. It can be used for rounding response values to the given number of digits after the decimal point. For example, `/api/v1/query?query=avg_over_time(temperature[1h])&round_digits=2` would round response values to up to two digits after the decimal point. + By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers. -See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details: * Any number [time series selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) via `match[]` query arg. * Optional `start` and `end` query args for limiting the time range for the selected labels or label values. +See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details. + Additionally VictoriaMetrics provides the following handlers: * `/api/v1/series/count` - returns the total number of time series in the database. Some notes: From 3ccf7ea20c62c04211bd2db334301fbe83491e8d Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 13:31:55 +0200 Subject: [PATCH 46/59] lib/storage: tune per-day index search --- lib/storage/index_db.go | 107 ++++++---------------------------------- 1 file changed, 14 insertions(+), 93 deletions(-) diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index fff9c0d77..296494e0a 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -2048,15 +2048,6 @@ func (is *indexSearch) getTagFilterWithMinMetricIDsCount(tfs *TagFilters, maxMet metricIDs, _, err := is.getMetricIDsForTagFilter(tf, nil, maxMetrics) if err != nil { - if err == errFallbackToMetricNameMatch { - // Skip tag filters requiring to scan for too many metrics. - kb.B = append(kb.B[:0], uselessSingleTagFilterKeyPrefix) - kb.B = encoding.MarshalUint64(kb.B, uint64(maxMetrics)) - kb.B = tf.Marshal(kb.B) - is.db.uselessTagFiltersCache.Set(kb.B, uselessTagFilterCacheValue) - uselessTagFilters++ - continue - } return nil, nil, fmt.Errorf("cannot find MetricIDs for tagFilter %s: %w", tf, err) } if metricIDs.Len() >= maxMetrics { @@ -2306,7 +2297,7 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf // Fast path: found metricIDs by date range. return nil } - if err != errFallbackToMetricNameMatch { + if err != errFallbackToGlobalSearch { return err } @@ -2330,12 +2321,6 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf continue } mIDs, err := is.intersectMetricIDsWithTagFilter(tf, minMetricIDs) - if err == errFallbackToMetricNameMatch { - // The tag filter requires too many index scans. Postpone it, - // so tag filters with lower number of index scans may be applied. - tfsPostponed = append(tfsPostponed, tf) - continue - } if err != nil { return err } @@ -2345,11 +2330,8 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf if len(tfsPostponed) > 0 && successfulIntersects == 0 { return is.updateMetricIDsByMetricNameMatch(metricIDs, minMetricIDs, tfsPostponed) } - for i, tf := range tfsPostponed { + for _, tf := range tfsPostponed { mIDs, err := is.intersectMetricIDsWithTagFilter(tf, minMetricIDs) - if err == errFallbackToMetricNameMatch { - return is.updateMetricIDsByMetricNameMatch(metricIDs, minMetricIDs, tfsPostponed[i:]) - } if err != nil { return err } @@ -2363,7 +2345,6 @@ const ( uselessSingleTagFilterKeyPrefix = 0 uselessMultiTagFiltersKeyPrefix = 1 uselessNegativeTagFilterKeyPrefix = 2 - uselessTagIntersectKeyPrefix = 3 ) var uselessTagFilterCacheValue = []byte("1") @@ -2377,27 +2358,20 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, filter *uint64set // Fast path for orSuffixes - seek for rows for each value from orSuffixes. loopsCount, err := is.updateMetricIDsForOrSuffixesNoFilter(tf, maxMetrics, metricIDs) if err != nil { - if err == errFallbackToMetricNameMatch { - return nil, loopsCount, err - } return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } return metricIDs, loopsCount, nil } // Slow path - scan for all the rows with the given prefix. - maxLoopsCount := uint64(maxMetrics) * maxIndexScanLoopsPerMetric - loopsCount, err := is.getMetricIDsForTagFilterSlow(tf, filter, maxLoopsCount, metricIDs.Add) + loopsCount, err := is.getMetricIDsForTagFilterSlow(tf, filter, metricIDs.Add) if err != nil { - if err == errFallbackToMetricNameMatch { - return nil, loopsCount, err - } return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf) } return metricIDs, loopsCount, nil } -func (is *indexSearch) getMetricIDsForTagFilterSlow(tf *tagFilter, filter *uint64set.Set, maxLoopsCount uint64, f func(metricID uint64)) (uint64, error) { +func (is *indexSearch) getMetricIDsForTagFilterSlow(tf *tagFilter, filter *uint64set.Set, f func(metricID uint64)) (uint64, error) { if len(tf.orSuffixes) > 0 { logger.Panicf("BUG: the getMetricIDsForTagFilterSlow must be called only for empty tf.orSuffixes; got %s", tf.orSuffixes) } @@ -2436,9 +2410,6 @@ func (is *indexSearch) getMetricIDsForTagFilterSlow(tf *tagFilter, filter *uint6 } mp.ParseMetricIDs() loopsCount += uint64(mp.MetricIDsLen()) - if loopsCount > maxLoopsCount { - return loopsCount, errFallbackToMetricNameMatch - } if prevMatch && string(suffix) == string(prevMatchingSuffix) { // Fast path: the same tag value found. // There is no need in checking it again with potentially @@ -2541,7 +2512,6 @@ func (is *indexSearch) updateMetricIDsForOrSuffixNoFilter(prefix []byte, maxMetr ts := &is.ts mp := &is.mp mp.Reset() - maxLoopsCount := uint64(maxMetrics) * maxIndexScanLoopsPerMetric var loopsCount uint64 loopsPaceLimiter := 0 ts.Seek(prefix) @@ -2560,9 +2530,6 @@ func (is *indexSearch) updateMetricIDsForOrSuffixNoFilter(prefix []byte, maxMetr return loopsCount, err } loopsCount += uint64(mp.MetricIDsLen()) - if loopsCount > maxLoopsCount { - return loopsCount, errFallbackToMetricNameMatch - } mp.ParseMetricIDs() metricIDs.AddMulti(mp.MetricIDs) } @@ -2581,8 +2548,6 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri ts := &is.ts mp := &is.mp mp.Reset() - maxLoopsCount := uint64(len(sortedFilter)) * maxIndexScanLoopsPerMetric - var loopsCount uint64 loopsPaceLimiter := 0 ts.Seek(prefix) var sf []uint64 @@ -2613,10 +2578,6 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri return nil } sf = sortedFilter - loopsCount += uint64(mp.MetricIDsLen()) - if loopsCount > maxLoopsCount { - return errFallbackToMetricNameMatch - } mp.ParseMetricIDs() for _, metricID = range mp.MetricIDs { if len(sf) == 0 { @@ -2660,7 +2621,7 @@ func binarySearchUint64(a []uint64, v uint64) uint { return i } -var errFallbackToMetricNameMatch = errors.New("fall back to updateMetricIDsByMetricNameMatch because of too many index scan loops") +var errFallbackToGlobalSearch = errors.New("fall back from per-day index search to global index search") var errMissingMetricIDsForDate = errors.New("missing metricIDs for date") @@ -2729,7 +2690,7 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set } if maxDate-minDate > maxDaysForDateMetricIDs { // Too much dates must be covered. Give up, since it may be slow. - return errFallbackToMetricNameMatch + return errFallbackToGlobalSearch } if minDate == maxDate { // Fast path - query only a single date. @@ -2759,14 +2720,14 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set return } if err != nil { - if err == errFallbackToMetricNameMatch { + if err == errFallbackToGlobalSearch { // The per-date search is too expensive. Probably it is faster to perform global search // using metric name match. errGlobal = err return } dateStr := time.Unix(int64(date*24*3600), 0) - errGlobal = fmt.Errorf("cannot search for metricIDs for %s: %w", dateStr, err) + errGlobal = fmt.Errorf("cannot search for metricIDs at %s: %w", dateStr, err) return } if metricIDs.Len() < maxMetrics { @@ -2799,7 +2760,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter origLoopsCount := loopsCount if loopsCount == 0 && tf.looksLikeHeavy() { // Set high loopsCount for heavy tag filters instead of spending CPU time on their execution. - loopsCount = 100e6 + loopsCount = 11e6 is.storeLoopsCountForDateFilter(date, tf, loopsCount) } if currentTime > lastQueryTimestamp+3600 { @@ -2813,7 +2774,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // Prevent from possible thundering herd issue when potentially heavy tf is executed from multiple concurrent queries // by temporary persisting its position in the tag filters list. if origLoopsCount == 0 { - origLoopsCount = 50e6 + origLoopsCount = 9e6 } is.storeLoopsCountForDateFilter(date, tf, origLoopsCount) } @@ -2834,7 +2795,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter var tfsPostponed []*tagFilter var metricIDs *uint64set.Set tfwsRemaining := tfws[:0] - maxDateMetrics := maxMetrics * 50 + maxDateMetrics := maxMetrics * 100 for i := range tfws { tfw := tfws[i] tf := tfw.tf @@ -2845,10 +2806,6 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, nil, tfs.commonPrefix, maxDateMetrics) is.storeLoopsCountForDateFilter(date, tf, loopsCount) if err != nil { - if err == errFallbackToMetricNameMatch { - tfsPostponed = append(tfsPostponed, tf) - continue - } return nil, err } if m.Len() >= maxDateMetrics { @@ -2878,7 +2835,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter } if m.Len() >= maxDateMetrics { // Too many time series found for the given (date). Fall back to global search. - return nil, errFallbackToMetricNameMatch + return nil, errFallbackToGlobalSearch } metricIDs = m } @@ -2908,10 +2865,6 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, metricIDs, tfs.commonPrefix, maxDateMetrics) is.storeLoopsCountForDateFilter(date, tf, loopsCount) if err != nil { - if err == errFallbackToMetricNameMatch { - tfsPostponed = append(tfsPostponed, tf) - continue - } return nil, err } if m.Len() >= maxDateMetrics { @@ -3102,7 +3055,7 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, kbPool.Put(kb) if err != nil { // Set high loopsCount for failing filter, so it is moved to the end of filter list. - loopsCount = 1e9 + loopsCount = 20e9 } if metricIDs.Len() >= maxMetrics { // Increase loopsCount for tag filter matching too many metrics, @@ -3210,31 +3163,6 @@ func (is *indexSearch) intersectMetricIDsWithTagFilter(tf *tagFilter, filter *ui if filter.Len() == 0 { return nil, nil } - kb := &is.kb - filterLenRounded := (uint64(filter.Len()) / 1024) * 1024 - kb.B = append(kb.B[:0], uselessTagIntersectKeyPrefix) - kb.B = encoding.MarshalUint64(kb.B, filterLenRounded) - kb.B = tf.Marshal(kb.B) - if len(is.db.uselessTagFiltersCache.Get(nil, kb.B)) > 0 { - // Skip useless work, since the intersection will return - // errFallbackToMetricNameMatc for the given filter. - return nil, errFallbackToMetricNameMatch - } - metricIDs, err := is.intersectMetricIDsWithTagFilterNocache(tf, filter) - if err == nil { - return metricIDs, err - } - if err != errFallbackToMetricNameMatch { - return nil, err - } - kb.B = append(kb.B[:0], uselessTagIntersectKeyPrefix) - kb.B = encoding.MarshalUint64(kb.B, filterLenRounded) - kb.B = tf.Marshal(kb.B) - is.db.uselessTagFiltersCache.Set(kb.B, uselessTagFilterCacheValue) - return nil, errFallbackToMetricNameMatch -} - -func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, filter *uint64set.Set) (*uint64set.Set, error) { metricIDs := filter if !tf.isNegative { metricIDs = &uint64set.Set{} @@ -3242,17 +3170,13 @@ func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, fil if len(tf.orSuffixes) > 0 { // Fast path for orSuffixes - seek for rows for each value from orSuffixes. if err := is.updateMetricIDsForOrSuffixesWithFilter(tf, metricIDs, filter); err != nil { - if err == errFallbackToMetricNameMatch { - return nil, err - } return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } return metricIDs, nil } // Slow path - scan for all the rows with the given prefix. - maxLoopsCount := uint64(filter.Len()) * maxIndexScanLoopsPerMetric - _, err := is.getMetricIDsForTagFilterSlow(tf, filter, maxLoopsCount, func(metricID uint64) { + _, err := is.getMetricIDsForTagFilterSlow(tf, filter, func(metricID uint64) { if tf.isNegative { // filter must be equal to metricIDs metricIDs.Del(metricID) @@ -3261,9 +3185,6 @@ func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, fil } }) if err != nil { - if err == errFallbackToMetricNameMatch { - return nil, err - } return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf) } return metricIDs, nil From fb935e6e2cd7556e716671fb420e2519c88bd94b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 14:58:19 +0200 Subject: [PATCH 47/59] docs/CHANGELOG.md: typo fix: FATURE -> FEATURE --- docs/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 3b0439047..7665d4aaf 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -9,7 +9,7 @@ * FEATURE: reduce median query duration by up to 2x. See https://github.com/VictoriaMetrics/VictoriaMetrics/commit/18fe0ff14bc78860c5569e2b70de1db78fac61be * FEATURE: export `vm_available_memory_bytes` and `vm_available_cpu_cores` metrics, which show the number of available RAM and available CPU cores for VictoriaMetrics apps. * FEATURE: vmagent: add ability to replicate scrape targets among `vmagent` instances in the cluster with `-promscrape.cluster.replicationFactor` command-line flag. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets). -* FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. +* FEATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details. * FEATURE: vmagent: support `proxy_tls_config`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` options at `scrape_config` section for configuring proxies specified via `proxy_url`. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-targets-via-a-proxy). * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. * FEATURE: accept `round_digits` query arg at `/api/v1/query` and `/api/v1/query_range` handlers. This option can be set at Prometheus datasource in Grafana for limiting the number of digits after the decimal point in response values. From 43504ebd14a97b630c5160edccbad6003e566a34 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 16:57:51 +0200 Subject: [PATCH 48/59] lib/uint64set: optimize bucket16.add and bucket16.addMulti a bit --- lib/uint64set/uint64set.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go index d3d6bde99..2739283ee 100644 --- a/lib/uint64set/uint64set.go +++ b/lib/uint64set/uint64set.go @@ -835,19 +835,22 @@ func (b *bucket16) copyTo(dst *bucket16) { } func (b *bucket16) add(x uint16) bool { - if b.bits == nil { + bits := b.bits + if bits == nil { return b.addToSmallPool(x) } wordNum, bitMask := getWordNumBitMask(x) - word := &b.bits[wordNum] - ok := *word&bitMask == 0 - *word |= bitMask + ok := bits[wordNum]&bitMask == 0 + if ok { + bits[wordNum] |= bitMask + } return ok } func (b *bucket16) addMulti(a []uint64) int { count := 0 - if b.bits == nil { + bits := b.bits + if bits == nil { // Slow path for _, x := range a { if b.add(uint16(x)) { @@ -858,11 +861,10 @@ func (b *bucket16) addMulti(a []uint64) int { // Fast path for _, x := range a { wordNum, bitMask := getWordNumBitMask(uint16(x)) - word := &b.bits[wordNum] - if *word&bitMask == 0 { + if bits[wordNum]&bitMask == 0 { + bits[wordNum] |= bitMask count++ } - *word |= bitMask } } return count From eb103e1527ee1020b5042b16f746c31d20a8eceb Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 17:10:40 +0200 Subject: [PATCH 49/59] lib/uint64set: optimize Set.AddMulti for large sorted sets --- lib/uint64set/uint64set.go | 93 ++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 53 deletions(-) diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go index 2739283ee..21398177c 100644 --- a/lib/uint64set/uint64set.go +++ b/lib/uint64set/uint64set.go @@ -141,36 +141,32 @@ func (s *Set) AddMulti(a []uint64) { if len(a) == 0 { return } - slowPath := false - hi := uint32(a[0] >> 32) - for _, x := range a[1:] { - if hi != uint32(x>>32) { - slowPath = true - break + hiPrev := uint32(a[0] >> 32) + i := 0 + for j, x := range a { + hi := uint32(x >> 32) + if hi == hiPrev { + continue } + b32 := s.getOrCreateBucket32(hiPrev) + s.itemsCount += b32.addMulti(a[i:j]) + hiPrev = hi + i = j } - if slowPath { - for _, x := range a { - s.Add(x) - } - return - } - // Fast path - all the items in a have identical higher 32 bits. - // Put them in a bulk into the corresponding bucket32. + b32 := s.getOrCreateBucket32(hiPrev) + s.itemsCount += b32.addMulti(a[i:]) +} + +func (s *Set) getOrCreateBucket32(hi uint32) *bucket32 { bs := s.buckets - var b32 *bucket32 for i := range bs { if bs[i].hi == hi { - b32 = &bs[i] - break + return &bs[i] } } - if b32 == nil { - b32 = s.addBucket32() - b32.hi = hi - } - n := b32.addMulti(a) - s.itemsCount += n + b32 := s.addBucket32() + b32.hi = hi + return b32 } func (s *Set) addBucket32() *bucket32 { @@ -609,41 +605,32 @@ func (b *bucket32) addMulti(a []uint64) int { if len(a) == 0 { return 0 } - hi := uint16(a[0] >> 16) - slowPath := false - for _, x := range a[1:] { - if hi != uint16(x>>16) { - slowPath = true - break + count := 0 + hiPrev := uint16(a[0] >> 16) + i := 0 + for j, x := range a { + hi := uint16(x >> 16) + if hi == hiPrev { + continue } + b16 := b.getOrCreateBucket16(hiPrev) + count += b16.addMulti(a[i:j]) + hiPrev = hi + i = j } - if slowPath { - count := 0 - for _, x := range a { - if b.add(uint32(x)) { - count++ - } - } - return count - } - // Fast path - all the items in a have identical higher 32+16 bits. - // Put them to a single bucket16 in a bulk. - var b16 *bucket16 + b16 := b.getOrCreateBucket16(hiPrev) + count += b16.addMulti(a[i:]) + return count +} + +func (b *bucket32) getOrCreateBucket16(hi uint16) *bucket16 { his := b.b16his bs := b.buckets - if n := b.getHint(); n < uint32(len(his)) && his[n] == hi { - b16 = &bs[n] + n := binarySearch16(his, hi) + if n < 0 || n >= len(his) || his[n] != hi { + return b.addBucketAtPos(hi, n) } - if b16 == nil { - n := binarySearch16(his, hi) - if n < 0 || n >= len(his) || his[n] != hi { - b16 = b.addBucketAtPos(hi, n) - } else { - b.setHint(n) - b16 = &bs[n] - } - } - return b16.addMulti(a) + return &bs[n] } func (b *bucket32) addSlow(hi, lo uint16) bool { From fb82c4b9faa99fd8bcf843ac3e843a7fe90b7867 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 17:23:29 +0200 Subject: [PATCH 50/59] lib/uint64set: reduce the size of bucket16 by storing smallPool by pointer. This reduces CPU time spent on bucket16 copying inside bucket13.addBucketAtPos. --- lib/uint64set/uint64set.go | 57 ++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go index 21398177c..c4cdc0d30 100644 --- a/lib/uint64set/uint64set.go +++ b/lib/uint64set/uint64set.go @@ -729,8 +729,8 @@ const ( type bucket16 struct { bits *[wordsPerBucket]uint64 + smallPool *[smallPoolSize]uint16 smallPoolLen int - smallPool [smallPoolSize]uint16 } const smallPoolSize = 56 @@ -807,7 +807,14 @@ func (b *bucket16) intersect(a *bucket16) { } func (b *bucket16) sizeBytes() uint64 { - return uint64(unsafe.Sizeof(*b)) + uint64(unsafe.Sizeof(*b.bits)) + n := unsafe.Sizeof(*b) + if b.bits != nil { + n += unsafe.Sizeof(*b.bits) + } + if b.smallPool != nil { + n += unsafe.Sizeof(*b.smallPool) + } + return uint64(n) } func (b *bucket16) copyTo(dst *bucket16) { @@ -818,7 +825,18 @@ func (b *bucket16) copyTo(dst *bucket16) { dst.bits = &bits } dst.smallPoolLen = b.smallPoolLen - dst.smallPool = b.smallPool + if b.smallPool != nil { + sp := dst.getOrCreateSmallPool() + *sp = *b.smallPool + } +} + +func (b *bucket16) getOrCreateSmallPool() *[smallPoolSize]uint16 { + if b.smallPool == nil { + var sp [smallPoolSize]uint16 + b.smallPool = &sp + } + return b.smallPool } func (b *bucket16) add(x uint16) bool { @@ -861,15 +879,16 @@ func (b *bucket16) addToSmallPool(x uint16) bool { if b.hasInSmallPool(x) { return false } - if b.smallPoolLen < len(b.smallPool) { - b.smallPool[b.smallPoolLen] = x + sp := b.getOrCreateSmallPool() + if b.smallPoolLen < len(sp) { + sp[b.smallPoolLen] = x b.smallPoolLen++ return true } b.smallPoolLen = 0 var bits [wordsPerBucket]uint64 b.bits = &bits - for _, v := range b.smallPool[:] { + for _, v := range sp[:] { b.add(v) } b.add(x) @@ -885,7 +904,11 @@ func (b *bucket16) has(x uint16) bool { } func (b *bucket16) hasInSmallPool(x uint16) bool { - for _, v := range b.smallPool[:b.smallPoolLen] { + sp := b.smallPool + if sp == nil { + return false + } + for _, v := range sp[:b.smallPoolLen] { if v == x { return true } @@ -905,9 +928,13 @@ func (b *bucket16) del(x uint16) bool { } func (b *bucket16) delFromSmallPool(x uint16) bool { - for i, v := range b.smallPool[:b.smallPoolLen] { + sp := b.smallPool + if sp == nil { + return false + } + for i, v := range sp[:b.smallPoolLen] { if v == x { - copy(b.smallPool[i:], b.smallPool[i+1:]) + copy(sp[i:], sp[i+1:]) b.smallPoolLen-- return true } @@ -918,11 +945,15 @@ func (b *bucket16) delFromSmallPool(x uint16) bool { func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 { hi64 := uint64(hi)<<32 | uint64(hi16)<<16 if b.bits == nil { + sp := b.smallPool + if sp == nil { + return dst + } // Use smallPoolSorter instead of sort.Slice here in order to reduce memory allocations. sps := smallPoolSorterPool.Get().(*smallPoolSorter) - // Sort a copy of b.smallPool, since b must be readonly in order to prevent from data races + // Sort a copy of sp, since b must be readonly in order to prevent from data races // when b.appendTo is called from concurrent goroutines. - sps.smallPool = b.smallPool + sps.smallPool = *sp sps.a = sps.smallPool[:b.smallPoolLen] if len(sps.a) > 1 && !sort.IsSorted(sps) { sort.Sort(sps) @@ -985,6 +1016,10 @@ func getWordNumBitMask(x uint16) (uint16, uint64) { func binarySearch16(u16 []uint16, x uint16) int { // The code has been adapted from sort.Search. n := len(u16) + if n > 0 && u16[n-1] < x { + // Fast path for values scanned in ascending order. + return n + } i, j := 0, n for i < j { h := int(uint(i+j) >> 1) From 82aab874465665ee90f0e6c641aa4589ffb8421d Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 20:18:22 +0200 Subject: [PATCH 51/59] app/vmselect/promql: fix tests after 2dae0a2c47a116c0020ae7965b43d88f0c6f0c11 --- app/vmselect/promql/exec_test.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index 62f46632f..2d1022d46 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -57,10 +57,11 @@ func TestExecSuccess(t *testing.T) { f := func(q string, resultExpected []netstorage.Result) { t.Helper() ec := &EvalConfig{ - Start: start, - End: end, - Step: step, - Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""), + Start: start, + End: end, + Step: step, + Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""), + RoundDigits: 100, } for i := 0; i < 5; i++ { result, err := Exec(ec, q, false) @@ -6575,10 +6576,11 @@ func TestExecError(t *testing.T) { f := func(q string) { t.Helper() ec := &EvalConfig{ - Start: 1000, - End: 2000, - Step: 100, - Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""), + Start: 1000, + End: 2000, + Step: 100, + Deadline: searchutils.NewDeadline(time.Now(), time.Minute, ""), + RoundDigits: 100, } for i := 0; i < 4; i++ { rv, err := Exec(ec, q, false) From 923cdb055264de587bc588811e4e2115e7972e29 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 20:29:27 +0200 Subject: [PATCH 52/59] app/vmselect/promql: reduce overhead on scrape interval estimation It should be enough to use the first 20 datapoints instead of 100 datapoints for scrape interval estimation. --- app/vmselect/promql/rollup.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/app/vmselect/promql/rollup.go b/app/vmselect/promql/rollup.go index c7f973a82..282b305aa 100644 --- a/app/vmselect/promql/rollup.go +++ b/app/vmselect/promql/rollup.go @@ -538,6 +538,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu // Do not drop trailing data points for queries, which return 2 or 1 point (aka instant queries). // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/845 canDropLastSample := rc.CanDropLastSample && len(rc.Timestamps) > 2 + f := rc.Func for _, tEnd := range rc.Timestamps { tStart := tEnd - window ni = seekFirstTimestampIdxAfter(timestamps[i:], tStart, ni) @@ -577,7 +578,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu rfa.realNextValue = nan } rfa.currTimestamp = tEnd - value := rc.Func(rfa) + value := f(rfa) rfa.idx++ dstValues = append(dstValues, value) } @@ -643,12 +644,12 @@ func getScrapeInterval(timestamps []int64) int64 { return int64(maxSilenceInterval) } - // Estimate scrape interval as 0.6 quantile for the first 100 intervals. + // Estimate scrape interval as 0.6 quantile for the first 20 intervals. h := histogram.GetFast() tsPrev := timestamps[0] timestamps = timestamps[1:] - if len(timestamps) > 100 { - timestamps = timestamps[:100] + if len(timestamps) > 20 { + timestamps = timestamps[:20] } for _, ts := range timestamps { h.Update(float64(ts - tsPrev)) From 9c77e34ef978b50c78fae9f361b2e3fd56fe9857 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 20:31:24 +0200 Subject: [PATCH 53/59] lib/storage: further tuning for time series selector code --- lib/storage/index_db.go | 103 ++++++++++++++++++++++++---------------- 1 file changed, 62 insertions(+), 41 deletions(-) diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 296494e0a..4e8ee3411 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -2356,7 +2356,13 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, filter *uint64set metricIDs := &uint64set.Set{} if len(tf.orSuffixes) > 0 { // Fast path for orSuffixes - seek for rows for each value from orSuffixes. - loopsCount, err := is.updateMetricIDsForOrSuffixesNoFilter(tf, maxMetrics, metricIDs) + var loopsCount uint64 + var err error + if filter != nil { + loopsCount, err = is.updateMetricIDsForOrSuffixesWithFilter(tf, metricIDs, filter) + } else { + loopsCount, err = is.updateMetricIDsForOrSuffixesNoFilter(tf, maxMetrics, metricIDs) + } if err != nil { return nil, loopsCount, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } @@ -2493,19 +2499,22 @@ func (is *indexSearch) updateMetricIDsForOrSuffixesNoFilter(tf *tagFilter, maxMe return loopsCount, nil } -func (is *indexSearch) updateMetricIDsForOrSuffixesWithFilter(tf *tagFilter, metricIDs, filter *uint64set.Set) error { +func (is *indexSearch) updateMetricIDsForOrSuffixesWithFilter(tf *tagFilter, metricIDs, filter *uint64set.Set) (uint64, error) { sortedFilter := filter.AppendTo(nil) kb := kbPool.Get() defer kbPool.Put(kb) + var loopsCount uint64 for _, orSuffix := range tf.orSuffixes { kb.B = append(kb.B[:0], tf.prefix...) kb.B = append(kb.B, orSuffix...) kb.B = append(kb.B, tagSeparatorChar) - if err := is.updateMetricIDsForOrSuffixWithFilter(kb.B, metricIDs, sortedFilter, tf.isNegative); err != nil { - return err + lc, err := is.updateMetricIDsForOrSuffixWithFilter(kb.B, metricIDs, sortedFilter, tf.isNegative) + if err != nil { + return loopsCount, err } + loopsCount += lc } - return nil + return loopsCount, nil } func (is *indexSearch) updateMetricIDsForOrSuffixNoFilter(prefix []byte, maxMetrics int, metricIDs *uint64set.Set) (uint64, error) { @@ -2539,15 +2548,16 @@ func (is *indexSearch) updateMetricIDsForOrSuffixNoFilter(prefix []byte, maxMetr return loopsCount, nil } -func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metricIDs *uint64set.Set, sortedFilter []uint64, isNegative bool) error { +func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metricIDs *uint64set.Set, sortedFilter []uint64, isNegative bool) (uint64, error) { if len(sortedFilter) == 0 { - return nil + return 0, nil } firstFilterMetricID := sortedFilter[0] lastFilterMetricID := sortedFilter[len(sortedFilter)-1] ts := &is.ts mp := &is.mp mp.Reset() + var loopsCount uint64 loopsPaceLimiter := 0 ts.Seek(prefix) var sf []uint64 @@ -2555,17 +2565,18 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri for ts.NextItem() { if loopsPaceLimiter&paceLimiterMediumIterationsMask == 0 { if err := checkSearchDeadlineAndPace(is.deadline); err != nil { - return err + return loopsCount, err } } loopsPaceLimiter++ item := ts.Item if !bytes.HasPrefix(item, prefix) { - return nil + return loopsCount, nil } if err := mp.InitOnlyTail(item, item[len(prefix):]); err != nil { - return err + return loopsCount, err } + loopsCount += uint64(mp.MetricIDsLen()) firstMetricID, lastMetricID := mp.FirstAndLastMetricIDs() if lastMetricID < firstFilterMetricID { // Skip the item, since it contains metricIDs lower @@ -2575,10 +2586,11 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri if firstMetricID > lastFilterMetricID { // Stop searching, since the current item and all the subsequent items // contain metricIDs higher than metricIDs in sortedFilter. - return nil + return loopsCount, nil } sf = sortedFilter mp.ParseMetricIDs() + matchingMetricIDs := mp.MetricIDs[:0] for _, metricID = range mp.MetricIDs { if len(sf) == 0 { break @@ -2593,18 +2605,23 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri if metricID < sf[0] { continue } - if isNegative { - metricIDs.Del(metricID) - } else { - metricIDs.Add(metricID) - } + matchingMetricIDs = append(matchingMetricIDs, metricID) sf = sf[1:] } + if len(matchingMetricIDs) > 0 { + if isNegative { + for _, metricID := range matchingMetricIDs { + metricIDs.Del(metricID) + } + } else { + metricIDs.AddMulti(matchingMetricIDs) + } + } } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for tag filter prefix %q: %w", prefix, err) + return loopsCount, fmt.Errorf("error when searching for tag filter prefix %q: %w", prefix, err) } - return nil + return loopsCount, nil } func binarySearchUint64(a []uint64, v uint64) uint { @@ -2792,10 +2809,9 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter }) // Populate metricIDs for the first non-negative filter. - var tfsPostponed []*tagFilter var metricIDs *uint64set.Set tfwsRemaining := tfws[:0] - maxDateMetrics := maxMetrics * 100 + maxDateMetrics := maxMetrics * 50 for i := range tfws { tfw := tfws[i] tf := tfw.tf @@ -2804,13 +2820,16 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter continue } m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, nil, tfs.commonPrefix, maxDateMetrics) - is.storeLoopsCountForDateFilter(date, tf, loopsCount) + if loopsCount > tfw.loopsCount { + is.storeLoopsCountForDateFilter(date, tf, loopsCount) + } if err != nil { return nil, err } if m.Len() >= maxDateMetrics { - // Too many time series found by a single tag filter. Postpone applying this filter via metricName match. - tfsPostponed = append(tfsPostponed, tf) + // Too many time series found by a single tag filter. Postpone applying this filter. + tfwsRemaining = append(tfwsRemaining, tfw) + tfw.loopsCount = loopsCount continue } metricIDs = m @@ -2846,6 +2865,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // when the intial tag filters significantly reduce the number of found metricIDs, // so the remaining filters could be performed via much faster metricName matching instead // of slow selecting of matching metricIDs. + var tfsPostponed []*tagFilter for i := range tfwsRemaining { tfw := tfwsRemaining[i] tf := tfw.tf @@ -2854,24 +2874,26 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // Short circuit - there is no need in applying the remaining filters to an empty set. break } - if uint64(metricIDsLen)*maxIndexScanLoopsPerMetric < tfw.loopsCount { + if tfw.loopsCount > uint64(metricIDsLen)*loopsCountPerMetricNameMatch { // It should be faster performing metricName match on the remaining filters // instead of scanning big number of entries in the inverted index for these filters. - tfsPostponed = append(tfsPostponed, tf) - // Store stats for non-executed tf, since it could be updated during protection from thundered herd. - is.storeLoopsCountForDateFilter(date, tf, tfw.loopsCount) - continue + for i < len(tfwsRemaining) { + tfw := tfwsRemaining[i] + tf := tfw.tf + tfsPostponed = append(tfsPostponed, tf) + // Store stats for non-executed tf, since it could be updated during protection from thundered herd. + is.storeLoopsCountForDateFilter(date, tf, tfw.loopsCount) + i++ + } + break + } + m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, metricIDs, tfs.commonPrefix, 0) + if loopsCount > tfw.loopsCount { + is.storeLoopsCountForDateFilter(date, tf, loopsCount) } - m, loopsCount, err := is.getMetricIDsForDateTagFilter(tf, date, metricIDs, tfs.commonPrefix, maxDateMetrics) - is.storeLoopsCountForDateFilter(date, tf, loopsCount) if err != nil { return nil, err } - if m.Len() >= maxDateMetrics { - // Too many time series found by a single tag filter. Postpone applying this filter via metricName match. - tfsPostponed = append(tfsPostponed, tf) - continue - } if tf.isNegative { metricIDs.Subtract(m) } else { @@ -3057,7 +3079,7 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, // Set high loopsCount for failing filter, so it is moved to the end of filter list. loopsCount = 20e9 } - if metricIDs.Len() >= maxMetrics { + if filter == nil && metricIDs.Len() >= maxMetrics { // Increase loopsCount for tag filter matching too many metrics, // So next time it is moved to the end of filter list. loopsCount *= 2 @@ -3154,10 +3176,8 @@ func (is *indexSearch) updateMetricIDsForPrefix(prefix []byte, metricIDs *uint64 return nil } -// The maximum number of index scan loops. -// Bigger number of loops is slower than updateMetricIDsByMetricNameMatch -// over the found metrics. -const maxIndexScanLoopsPerMetric = 100 +// The estimated number of index scan loops a single loop in updateMetricIDsByMetricNameMatch takes. +const loopsCountPerMetricNameMatch = 500 func (is *indexSearch) intersectMetricIDsWithTagFilter(tf *tagFilter, filter *uint64set.Set) (*uint64set.Set, error) { if filter.Len() == 0 { @@ -3169,7 +3189,8 @@ func (is *indexSearch) intersectMetricIDsWithTagFilter(tf *tagFilter, filter *ui } if len(tf.orSuffixes) > 0 { // Fast path for orSuffixes - seek for rows for each value from orSuffixes. - if err := is.updateMetricIDsForOrSuffixesWithFilter(tf, metricIDs, filter); err != nil { + _, err := is.updateMetricIDsForOrSuffixesWithFilter(tf, metricIDs, filter) + if err != nil { return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } return metricIDs, nil From b1aa8c3d8f68e9cc8f6bbcfa56bf3f6462c328db Mon Sep 17 00:00:00 2001 From: Nikolay Date: Mon, 15 Mar 2021 22:10:47 +0300 Subject: [PATCH 54/59] adds fake response for telegraph queries (#1130) https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124 --- app/vmagent/README.md | 1 + app/vmagent/main.go | 13 +++++++++++-- app/vminsert/main.go | 13 +++++++++++-- docs/vmagent.md | 1 + 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/app/vmagent/README.md b/app/vmagent/README.md index 2606690c5..cf6a739b8 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -515,6 +515,7 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -import.maxLineLen max_rows_per_line The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -influx.databasesNames comma separated names of influx database, that will be returned for /query and /influx/query request. -influx.maxLineSize value The maximum size in bytes for a single Influx line during parsing Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 865f03459..02b65641c 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -49,6 +49,7 @@ var ( dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+ "-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+ "Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") + influxDatabasesNames = flag.String("influx.databasesNames", "_internal", "Comma separated names of databases, that will be returned for /query and /influx/query api.") ) var ( @@ -205,9 +206,17 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool { return true case "/query": // Emulate fake response for influx query. - // This is required for TSBS benchmark. + // This is required for TSBS benchmark and some telegraph plugins. influxQueryRequests.Inc() - fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`) + var dbs string + influxDbs := strings.Split(*influxDatabasesNames, ",") + for i := range influxDbs { + dbs += fmt.Sprintf(`"[%s]"`, influxDbs[i]) + if i != len(influxDbs)-1 { + dbs += "," + } + } + fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, dbs) return true case "/targets": promscrapeTargetsRequests.Inc() diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 5e69efeb9..55696ec68 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -40,6 +40,7 @@ var ( "Usually :4242 must be set. Doesn't work if empty") opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty") maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped") + influxDatabasesNames = flag.String("influx.databasesNames", "_internal", "Comma separated names of databases, that will be returned for /query and /influx/query api.") ) var ( @@ -148,9 +149,17 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true case "/influx/query", "/query": // Emulate fake response for influx query. - // This is required for TSBS benchmark. + // This is required for TSBS benchmark and some telegraph plugins. influxQueryRequests.Inc() - fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`) + var dbs string + influxDbs := strings.Split(*influxDatabasesNames, ",") + for i := range influxDbs { + dbs += fmt.Sprintf(`"[%s]"`, influxDbs[i]) + if i != len(influxDbs)-1 { + dbs += "," + } + } + fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, dbs) return true case "/prometheus/targets", "/targets": promscrapeTargetsRequests.Inc() diff --git a/docs/vmagent.md b/docs/vmagent.md index 2606690c5..cf6a739b8 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -515,6 +515,7 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -import.maxLineLen max_rows_per_line The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -influx.databasesNames comma separated names of influx database, that will be returned for /query and /influx/query request. -influx.maxLineSize value The maximum size in bytes for a single Influx line during parsing Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) From 7c37e9aea938c9c4c9b0b1d3510165e71be1ce3b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 21:37:13 +0200 Subject: [PATCH 55/59] app/{vminsert,vmagent}: a follow-up for b1aa8c3d8f68e9cc8f6bbcfa56bf3f6462c328db Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124 --- README.md | 4 ++++ app/vmagent/README.md | 8 ++++++-- app/vmagent/main.go | 14 ++----------- app/vminsert/main.go | 14 ++----------- docs/CHANGELOG.md | 1 + docs/Single-server-VictoriaMetrics.md | 4 ++++ docs/vmagent.md | 8 ++++++-- lib/influxutils/influxutils.go | 29 +++++++++++++++++++++++++++ 8 files changed, 54 insertions(+), 28 deletions(-) create mode 100644 lib/influxutils/influxutils.go diff --git a/README.md b/README.md index ef1c954be..07ff8889c 100644 --- a/README.md +++ b/README.md @@ -413,6 +413,10 @@ while VictoriaMetrics stores them with *milliseconds* precision. Extra labels may be added to all the written time series by passing `extra_label=name=value` query args. For example, `/write?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics. +Some plugins for Telegraf such as [fluentd](https://github.com/fangli/fluent-plugin-influxdb), [Juniper/open-nti](https://github.com/Juniper/open-nti) +or [Juniper/jitmon](https://github.com/Juniper/jtimon) send `SHOW DATABASES` query to `/query` and expect a particular database name in the response. +Comma-separated list of expected databases can be passed to VictoriaMetrics via `-influx.databaseNames` command-line flag. + ## How to send data from Graphite-compatible agents such as [StatsD](https://github.com/etsy/statsd) Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command line flag. For instance, diff --git a/app/vmagent/README.md b/app/vmagent/README.md index cf6a739b8..86fd70dd6 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -515,7 +515,9 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -import.maxLineLen max_rows_per_line The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) - -influx.databasesNames comma separated names of influx database, that will be returned for /query and /influx/query request. + -influx.databaseNames array + Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb + Supports array of values separated by comma or specified via multiple flags. -influx.maxLineSize value The maximum size in bytes for a single Influx line during parsing Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) @@ -574,6 +576,8 @@ See the docs at https://victoriametrics.github.io/vmagent.html . The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster -promscrape.cluster.membersCount int The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets + -promscrape.cluster.replicationFactor int + The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://victoriametrics.github.io/#deduplication (default 1) -promscrape.config string Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://victoriametrics.github.io/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details -promscrape.config.dryRun @@ -607,7 +611,7 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -promscrape.gceSDCheckInterval gce_sd_configs Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) -promscrape.kubernetes.apiServerTimeout duration - How frequently to reload the full state from Kuberntes API server (default 10m0s) + How frequently to reload the full state from Kuberntes API server (default 30m0s) -promscrape.kubernetesSDCheckInterval kubernetes_sd_configs Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) -promscrape.maxDroppedTargets droppedTargets diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 02b65641c..2011ccffb 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -23,6 +23,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/influxutils" graphiteserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/graphite" influxserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/influx" opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb" @@ -49,7 +50,6 @@ var ( dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+ "-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+ "Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") - influxDatabasesNames = flag.String("influx.databasesNames", "_internal", "Comma separated names of databases, that will be returned for /query and /influx/query api.") ) var ( @@ -205,18 +205,8 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool { w.WriteHeader(http.StatusNoContent) return true case "/query": - // Emulate fake response for influx query. - // This is required for TSBS benchmark and some telegraph plugins. influxQueryRequests.Inc() - var dbs string - influxDbs := strings.Split(*influxDatabasesNames, ",") - for i := range influxDbs { - dbs += fmt.Sprintf(`"[%s]"`, influxDbs[i]) - if i != len(influxDbs)-1 { - dbs += "," - } - } - fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, dbs) + influxutils.WriteDatabaseNames(w) return true case "/targets": promscrapeTargetsRequests.Inc() diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 55696ec68..0a3449ca7 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -19,6 +19,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/influxutils" graphiteserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/graphite" influxserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/influx" opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb" @@ -40,7 +41,6 @@ var ( "Usually :4242 must be set. Doesn't work if empty") opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty") maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped") - influxDatabasesNames = flag.String("influx.databasesNames", "_internal", "Comma separated names of databases, that will be returned for /query and /influx/query api.") ) var ( @@ -148,18 +148,8 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { w.WriteHeader(http.StatusNoContent) return true case "/influx/query", "/query": - // Emulate fake response for influx query. - // This is required for TSBS benchmark and some telegraph plugins. influxQueryRequests.Inc() - var dbs string - influxDbs := strings.Split(*influxDatabasesNames, ",") - for i := range influxDbs { - dbs += fmt.Sprintf(`"[%s]"`, influxDbs[i]) - if i != len(influxDbs)-1 { - dbs += "," - } - } - fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, dbs) + influxutils.WriteDatabaseNames(w) return true case "/prometheus/targets", "/targets": promscrapeTargetsRequests.Inc() diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 7665d4aaf..26411e4e8 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -13,6 +13,7 @@ * FEATURE: vmagent: support `proxy_tls_config`, `proxy_basic_auth`, `proxy_bearer_token` and `proxy_bearer_token_file` options at `scrape_config` section for configuring proxies specified via `proxy_url`. See [these docs](https://victoriametrics.github.io/vmagent.html#scraping-targets-via-a-proxy). * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. * FEATURE: accept `round_digits` query arg at `/api/v1/query` and `/api/v1/query_range` handlers. This option can be set at Prometheus datasource in Grafana for limiting the number of digits after the decimal point in response values. +* FEATURE: add `-influx.databaseNames` command-line flag, which can be used for accepting data from some Telegraf plugins such as [fluentd plugin](https://github.com/fangli/fluent-plugin-influxdb). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124). * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index ef1c954be..07ff8889c 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -413,6 +413,10 @@ while VictoriaMetrics stores them with *milliseconds* precision. Extra labels may be added to all the written time series by passing `extra_label=name=value` query args. For example, `/write?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics. +Some plugins for Telegraf such as [fluentd](https://github.com/fangli/fluent-plugin-influxdb), [Juniper/open-nti](https://github.com/Juniper/open-nti) +or [Juniper/jitmon](https://github.com/Juniper/jtimon) send `SHOW DATABASES` query to `/query` and expect a particular database name in the response. +Comma-separated list of expected databases can be passed to VictoriaMetrics via `-influx.databaseNames` command-line flag. + ## How to send data from Graphite-compatible agents such as [StatsD](https://github.com/etsy/statsd) Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command line flag. For instance, diff --git a/docs/vmagent.md b/docs/vmagent.md index cf6a739b8..86fd70dd6 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -515,7 +515,9 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -import.maxLineLen max_rows_per_line The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) - -influx.databasesNames comma separated names of influx database, that will be returned for /query and /influx/query request. + -influx.databaseNames array + Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb + Supports array of values separated by comma or specified via multiple flags. -influx.maxLineSize value The maximum size in bytes for a single Influx line during parsing Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) @@ -574,6 +576,8 @@ See the docs at https://victoriametrics.github.io/vmagent.html . The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster -promscrape.cluster.membersCount int The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets + -promscrape.cluster.replicationFactor int + The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://victoriametrics.github.io/#deduplication (default 1) -promscrape.config string Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://victoriametrics.github.io/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details -promscrape.config.dryRun @@ -607,7 +611,7 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -promscrape.gceSDCheckInterval gce_sd_configs Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) -promscrape.kubernetes.apiServerTimeout duration - How frequently to reload the full state from Kuberntes API server (default 10m0s) + How frequently to reload the full state from Kuberntes API server (default 30m0s) -promscrape.kubernetesSDCheckInterval kubernetes_sd_configs Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) -promscrape.maxDroppedTargets droppedTargets diff --git a/lib/influxutils/influxutils.go b/lib/influxutils/influxutils.go new file mode 100644 index 000000000..5a648b9e5 --- /dev/null +++ b/lib/influxutils/influxutils.go @@ -0,0 +1,29 @@ +package influxutils + +import ( + "fmt" + "net/http" + "strings" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" +) + +var influxDatabaseNames = flagutil.NewArray("influx.databaseNames", "Comma-separated list of database names to return from /query and /influx/query API. "+ + "This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb") + +// WriteDatabaseNames writes influxDatabaseNames to w. +func WriteDatabaseNames(w http.ResponseWriter) { + // Emulate fake response for influx query. + // This is required for TSBS benchmark and some Telegraf plugins. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124 + w.Header().Set("Content-Type", "application/json; charset=utf-8") + dbNames := *influxDatabaseNames + if len(dbNames) == 0 { + dbNames = []string{"_internal"} + } + dbs := make([]string, len(dbNames)) + for i := range dbNames { + dbs[i] = fmt.Sprintf(`"[%s]"`, dbNames[i]) + } + fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, strings.Join(dbs, ",")) +} From 85a95bf60c7ab9c1aba350f5ce25f494cd3140f5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 21:59:25 +0200 Subject: [PATCH 56/59] all: various fixes in command-line flag descriptions --- README.md | 248 ++++++++++++++++++++++- app/vmagent/README.md | 66 +++--- app/vmagent/main.go | 2 +- app/vmalert/README.md | 4 +- app/vmauth/README.md | 4 +- app/vmbackup/README.md | 8 +- app/vminsert/main.go | 2 +- app/vmrestore/README.md | 8 +- app/vmselect/querystats/querystats.go | 4 +- docs/Single-server-VictoriaMetrics.md | 248 ++++++++++++++++++++++- docs/vmagent.md | 66 +++--- docs/vmalert.md | 4 +- docs/vmauth.md | 4 +- docs/vmbackup.md | 8 +- docs/vmrestore.md | 8 +- lib/flagutil/bytes.go | 2 +- lib/promscrape/client.go | 4 +- lib/promscrape/discovery/consul/watch.go | 2 +- lib/promscrape/scraper.go | 16 +- lib/promscrape/targetstatus.go | 2 +- lib/protoparser/vmimport/streamparser.go | 2 +- 21 files changed, 602 insertions(+), 110 deletions(-) diff --git a/README.md b/README.md index 07ff8889c..c68e8d76d 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ Alphabetically sorted links to case studies: * [Font used](#font-used) * [Color Palette](#color-palette) * [We kindly ask](#we-kindly-ask) +* [List of command-line flags](#list-of-command-line-flags) ## How to start VictoriaMetrics @@ -182,7 +183,7 @@ The following command-line flags are used the most: * `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory. * `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [these docs](#retention) for more details. -Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see all the available flags with description and default values. +Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags). See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics](#grafana-setup) and how to [handle alerts](#alerting). @@ -1545,3 +1546,248 @@ Files included in each folder: * There should be sufficient clear space around the logo. * Do not change spacing, alignment, or relative locations of the design elements. * Do not change the proportions of any of the design elements or the design itself. You may resize as needed but must retain all proportions. + + +## List of command-line flags + +Pass `-help` to VictoriaMetrics in order to see the list of supported command-line flags with their description: + +``` + -bigMergeConcurrency int + The maximum number of CPU cores to use for big merges. Default value is used if set to 0 + -csvTrimTimestamp duration + Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -dedup.minScrapeInterval duration + Remove superflouos samples from time series if they are located closer to each other than this duration. This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. Deduplication is disabled if the -dedup.minScrapeInterval is 0 + -deleteAuthKey string + authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries + -denyQueriesOutsideRetention + Whether to deny queries outside of the configured -retentionPeriod. When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. This may be useful when multiple data sources with distinct retentions are hidden behind query-tee + -dryRun + Whether to check only -promscrape.config and then exit. Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse + -enableTCP6 + Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP is used + -envflag.enable + Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set + -envflag.prefix string + Prefix for environment variables if -envflag.enable is set + -finalMergeDelay duration + The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge + -forceFlushAuthKey string + authKey, which must be passed in query string to /internal/force_flush pages + -forceMergeAuthKey string + authKey, which must be passed in query string to /internal/force_merge pages + -fs.disableMmap + Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() + -graphiteListenAddr string + TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty + -graphiteTrimTimestamp duration + Trim timestamps for Graphite data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) + -http.connTimeout duration + Incoming http connections are closed after the configured timeout. This may help spreading incoming load among a cluster of services behind load balancer. Note that the real timeout may be bigger by up to 10% as a protection from Thundering herd problem (default 2m0s) + -http.disableResponseCompression + Disable compression of HTTP responses for saving CPU resources. By default compression is enabled to save network bandwidth + -http.idleConnTimeout duration + Timeout for incoming idle http connections (default 1m0s) + -http.maxGracefulShutdownDuration duration + The maximum duration for graceful shutdown of HTTP server. Highly loaded server may require increased value for graceful shutdown (default 7s) + -http.pathPrefix string + An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. See https://www.robustperception.io/using-external-urls-and-proxies-with-prometheus + -http.shutdownDelay duration + Optional delay before http server shutdown. During this dealy the servier returns non-OK responses from /health page, so load balancers can route new requests to other servers + -httpAuth.password string + Password for HTTP Basic Auth. The authentication is disabled if -httpAuth.username is empty + -httpAuth.username string + Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password + -httpListenAddr string + TCP address to listen for http connections (default ":8428") + -import.maxLineLen size + The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -influx.databaseNames array + Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb + Supports array of values separated by comma or specified via multiple flags. + -influx.maxLineSize size + The maximum size in bytes for a single Influx line during parsing + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144) + -influxListenAddr string + TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://:8428/write + -influxMeasurementFieldSeparator string + Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_") + -influxSkipMeasurement + Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator' + -influxSkipSingleField + Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field + -influxTrimTimestamp duration + Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -insert.maxQueueDuration duration + The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) + -loggerDisableTimestamps + Whether to disable writing timestamps in logs + -loggerErrorsPerSecondLimit int + Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, then the remaining errors are suppressed. Zero value disables the rate limit + -loggerFormat string + Format for logs. Possible values: default, json (default "default") + -loggerLevel string + Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO") + -loggerOutput string + Output for the logs. Supported values: stderr, stdout (default "stderr") + -loggerTimezone string + Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") + -loggerWarnsPerSecondLimit int + Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit + -maxConcurrentInserts int + The maximum number of concurrent inserts. Default value should work for most cases, since it minimizes the overhead for concurrent inserts. This option is tigthly coupled with -insert.maxQueueDuration (default 16) + -maxInsertRequestSize size + The maximum size in bytes of a single Prometheus remote_write API request + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -maxLabelsPerTimeseries int + The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30) + -memory.allowedBytes size + Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedPercent float + Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) + -metricsAuthKey string + Auth key for /metrics. It overrides httpAuth settings + -opentsdbHTTPListenAddr string + TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty + -opentsdbListenAddr string + TCP and UDP address to listen for OpentTSDB metrics. Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. Usually :4242 must be set. Doesn't work if empty + -opentsdbTrimTimestamp duration + Trim timestamps for OpenTSDB 'telnet put' data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) + -opentsdbhttp.maxInsertRequestSize size + The maximum size of OpenTSDB HTTP put request + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -opentsdbhttpTrimTimestamp duration + Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -pprofAuthKey string + Auth key for /debug/pprof. It overrides httpAuth settings + -precisionBits int + The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64) + -promscrape.cluster.memberNum int + The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster + -promscrape.cluster.membersCount int + The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets + -promscrape.cluster.replicationFactor int + The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://victoriametrics.github.io/#deduplication (default 1) + -promscrape.config string + Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://victoriametrics.github.io/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details + -promscrape.config.dryRun + Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. + -promscrape.config.strictParse + Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped + -promscrape.configCheckInterval duration + Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes + -promscrape.consulSDCheckInterval duration + Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s) + -promscrape.disableCompression + Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.disableKeepAlive + Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets + -promscrape.discovery.concurrency int + The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100) + -promscrape.discovery.concurrentWaitTime duration + The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) + -promscrape.dnsSDCheckInterval duration + Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) + -promscrape.dockerswarmSDCheckInterval duration + Interval for checking for changes in dockerswarm. This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details (default 30s) + -promscrape.dropOriginalLabels + Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. Note that this reduces debuggability for improper per-target relabeling configs + -promscrape.ec2SDCheckInterval duration + Interval for checking for changes in ec2. This works only if ec2_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config for details (default 1m0s) + -promscrape.eurekaSDCheckInterval duration + Interval for checking for changes in eureka. This works only if eureka_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config for details (default 30s) + -promscrape.fileSDCheckInterval duration + Interval for checking for changes in 'file_sd_config'. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config for details (default 30s) + -promscrape.gceSDCheckInterval duration + Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) + -promscrape.kubernetes.apiServerTimeout duration + How frequently to reload the full state from Kuberntes API server (default 30m0s) + -promscrape.kubernetesSDCheckInterval duration + Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) + -promscrape.maxDroppedTargets int + The maximum number of droppedTargets shown at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000) + -promscrape.maxScrapeSize size + The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216) + -promscrape.openstackSDCheckInterval duration + Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s) + -promscrape.streamParse + Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.suppressDuplicateScrapeTargetErrors + Whether to suppress 'duplicate scrape target' errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details + -promscrape.suppressScrapeErrors + Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed + -relabelConfig string + Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://victoriametrics.github.io/#relabeling for details + -retentionPeriod value + Data with timestamps outside the retentionPeriod is automatically deleted + The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1) + -search.cacheTimestampOffset duration + The maximum duration since the current time for response data, which is always queried from the original raw data, without using the response cache. Increase this value if you see gaps in responses due to time synchronization issues between VictoriaMetrics and data sources (default 5m0s) + -search.disableCache + Whether to disable response caching. This may be useful during data backfilling + -search.latencyOffset duration + The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) + -search.logSlowQueryDuration duration + Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s) + -search.maxConcurrentRequests int + The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8) + -search.maxExportDuration duration + The maximum duration for /api/v1/export call (default 720h0m0s) + -search.maxLookback duration + Synonim to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons + -search.maxPointsPerTimeseries int + The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values significantly exceeding horizontal resoultion of the graph (default 30000) + -search.maxQueryDuration duration + The maximum duration for query execution (default 30s) + -search.maxQueryLen size + The maximum search query length in bytes + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16384) + -search.maxQueueDuration duration + The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached; see also -search.maxQueryDuration (default 10s) + -search.maxStalenessInterval duration + The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons + -search.maxStepForPointsAdjustment duration + The maximum step when /api/v1/query_range handler adjusts points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data (default 1m0s) + -search.maxTagKeys int + The maximum number of tag keys returned from /api/v1/labels (default 100000) + -search.maxTagValueSuffixesPerSearch int + The maximum number of tag value suffixes returned from /metrics/find (default 100000) + -search.maxTagValues int + The maximum number of tag values returned from /api/v1/label//values (default 100000) + -search.maxUniqueTimeseries int + The maximum number of unique time series each search can scan (default 300000) + -search.minStalenessInterval duration + The minimum interval for staleness calculations. This flag could be useful for removing gaps on graphs generated from time series with irregular intervals between samples. See also '-search.maxStalenessInterval' + -search.queryStats.lastQueriesCount int + Query stats for /api/v1/status/top_queries is tracked on this number of last queries. Zero value disables query stats tracking (default 20000) + -search.queryStats.minQueryDuration int + The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats + -search.resetCacheAuthKey string + Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call + -search.treatDotsAsIsInRegexps + Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter + -selfScrapeInstance string + Value for 'instance' label, which is added to self-scraped metrics (default "self") + -selfScrapeInterval duration + Interval for self-scraping own metrics at /metrics page + -selfScrapeJob string + Value for 'job' label, which is added to self-scraped metrics (default "victoria-metrics") + -smallMergeConcurrency int + The maximum number of CPU cores to use for small merges. Default value is used if set to 0 + -snapshotAuthKey string + authKey, which must be passed in query string to /snapshot* pages + -storageDataPath string + Path to storage data (default "victoria-metrics-data") + -tls + Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set + -tlsCertFile string + Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs, since RSA certs are slow + -tlsKeyFile string + Path to file with TLS key. Used only if -tls is set + -version + Show VictoriaMetrics version +``` diff --git a/app/vmagent/README.md b/app/vmagent/README.md index 86fd70dd6..f1625ada8 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -512,16 +512,16 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password -httpListenAddr string TCP address to listen for http connections. Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. Note that /targets and /metrics pages aren't available if -httpListenAddr='' (default ":8429") - -import.maxLineLen max_rows_per_line - The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -import.maxLineLen size + The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 104857600) -influx.databaseNames array Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb Supports array of values separated by comma or specified via multiple flags. - -influx.maxLineSize value + -influx.maxLineSize size The maximum size in bytes for a single Influx line during parsing - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) - -influxListenAddr http://:8429/write + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144) + -influxListenAddr string TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://:8429/write -influxMeasurementFieldSeparator string Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_") @@ -549,12 +549,12 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit -maxConcurrentInserts int The maximum number of concurrent inserts. Default value should work for most cases, since it minimizes the overhead for concurrent inserts. This option is tigthly coupled with -insert.maxQueueDuration (default 16) - -maxInsertRequestSize value + -maxInsertRequestSize size The maximum size in bytes of a single Prometheus remote_write API request - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 33554432) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string @@ -565,9 +565,9 @@ See the docs at https://victoriametrics.github.io/vmagent.html . TCP and UDP address to listen for OpentTSDB metrics. Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. Usually :4242 must be set. Doesn't work if empty -opentsdbTrimTimestamp duration Trim timestamps for OpenTSDB 'telnet put' data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) - -opentsdbhttp.maxInsertRequestSize value + -opentsdbhttp.maxInsertRequestSize size The maximum size of OpenTSDB HTTP put request - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) -opentsdbhttpTrimTimestamp duration Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) -pprofAuthKey string @@ -586,45 +586,45 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped -promscrape.configCheckInterval duration Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes - -promscrape.consulSDCheckInterval consul_sd_configs + -promscrape.consulSDCheckInterval duration Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s) -promscrape.disableCompression Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control - -promscrape.disableKeepAlive disable_keepalive: true - Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set disable_keepalive: true individually per each 'scrape_config` section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets + -promscrape.disableKeepAlive + Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets -promscrape.discovery.concurrency int The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100) -promscrape.discovery.concurrentWaitTime duration The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) - -promscrape.dnsSDCheckInterval dns_sd_configs + -promscrape.dnsSDCheckInterval duration Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) - -promscrape.dockerswarmSDCheckInterval dockerswarm_sd_configs + -promscrape.dockerswarmSDCheckInterval duration Interval for checking for changes in dockerswarm. This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details (default 30s) -promscrape.dropOriginalLabels Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. Note that this reduces debuggability for improper per-target relabeling configs - -promscrape.ec2SDCheckInterval ec2_sd_configs + -promscrape.ec2SDCheckInterval duration Interval for checking for changes in ec2. This works only if ec2_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config for details (default 1m0s) - -promscrape.eurekaSDCheckInterval eureka_sd_configs + -promscrape.eurekaSDCheckInterval duration Interval for checking for changes in eureka. This works only if eureka_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config for details (default 30s) -promscrape.fileSDCheckInterval duration Interval for checking for changes in 'file_sd_config'. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config for details (default 30s) - -promscrape.gceSDCheckInterval gce_sd_configs + -promscrape.gceSDCheckInterval duration Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) -promscrape.kubernetes.apiServerTimeout duration How frequently to reload the full state from Kuberntes API server (default 30m0s) - -promscrape.kubernetesSDCheckInterval kubernetes_sd_configs + -promscrape.kubernetesSDCheckInterval duration Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) - -promscrape.maxDroppedTargets droppedTargets + -promscrape.maxDroppedTargets int The maximum number of droppedTargets shown at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000) - -promscrape.maxScrapeSize value + -promscrape.maxScrapeSize size The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 16777216) - -promscrape.openstackSDCheckInterval openstack_sd_configs + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216) + -promscrape.openstackSDCheckInterval duration Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s) - -promscrape.streamParse stream_parse: true - Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set stream_parse: true individually per each `scrape_config` section in `-promscrape.config` for fine grained control - -promscrape.suppressDuplicateScrapeTargetErrors duplicate scrape target - Whether to suppress duplicate scrape target errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details + -promscrape.streamParse + Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.suppressDuplicateScrapeTargetErrors + Whether to suppress 'duplicate scrape target' errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details -promscrape.suppressScrapeErrors Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed -remoteWrite.basicAuth.password array @@ -641,12 +641,12 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -remoteWrite.label array Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. Pass multiple -remoteWrite.label flags in order to add multiple flags to metrics before sending them to remote storage Supports array of values separated by comma or specified via multiple flags. - -remoteWrite.maxBlockSize value + -remoteWrite.maxBlockSize size The maximum size in bytes of unpacked request to send to remote storage. It shouldn't exceed -maxInsertRequestSize from VictoriaMetrics - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 8388608) - -remoteWrite.maxDiskUsagePerURL value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608) + -remoteWrite.maxDiskUsagePerURL size The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500000000. Disk usage is unlimited if the value is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -remoteWrite.proxyURL array Optional proxy URL for writing data to -remoteWrite.url. Supported proxies: http, https, socks5. Example: -remoteWrite.proxyURL=socks5://proxy:1234 Supports array of values separated by comma or specified via multiple flags. diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 2011ccffb..7d7635a10 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -41,7 +41,7 @@ var ( "Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+ "Note that /targets and /metrics pages aren't available if -httpListenAddr=''") influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. "+ - "This flag isn't needed when ingesting data over HTTP - just send it to `http://:8429/write`") + "This flag isn't needed when ingesting data over HTTP - just send it to http://:8429/write") graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty") opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+ "Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+ diff --git a/app/vmalert/README.md b/app/vmalert/README.md index 5a286c289..691ab86a0 100644 --- a/app/vmalert/README.md +++ b/app/vmalert/README.md @@ -272,9 +272,9 @@ The shortlist of configuration flags is the following: Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -memory.allowedBytes value + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string diff --git a/app/vmauth/README.md b/app/vmauth/README.md index 133718787..4baa2b9e8 100644 --- a/app/vmauth/README.md +++ b/app/vmauth/README.md @@ -208,9 +208,9 @@ See the docs at https://victoriametrics.github.io/vmauth.html . Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -memory.allowedBytes value + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string diff --git a/app/vmbackup/README.md b/app/vmbackup/README.md index a26357b89..d471847a7 100644 --- a/app/vmbackup/README.md +++ b/app/vmbackup/README.md @@ -205,12 +205,12 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time- Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -maxBytesPerSecond value + -maxBytesPerSecond size The maximum upload speed. There is no limit if it is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -origin string diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 0a3449ca7..cafa8d31b 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -35,7 +35,7 @@ import ( var ( graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty") influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. "+ - "This flag isn't needed when ingesting data over HTTP - just send it to `http://:8428/write`") + "This flag isn't needed when ingesting data over HTTP - just send it to http://:8428/write") opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+ "Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+ "Usually :4242 must be set. Doesn't work if empty") diff --git a/app/vmrestore/README.md b/app/vmrestore/README.md index 67127d5be..3fa55905d 100644 --- a/app/vmrestore/README.md +++ b/app/vmrestore/README.md @@ -105,12 +105,12 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -maxBytesPerSecond value + -maxBytesPerSecond size The maximum download speed. There is no limit if it is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -skipBackupCompleteCheck diff --git a/app/vmselect/querystats/querystats.go b/app/vmselect/querystats/querystats.go index 389211ae9..b2f7f41ec 100644 --- a/app/vmselect/querystats/querystats.go +++ b/app/vmselect/querystats/querystats.go @@ -12,9 +12,9 @@ import ( ) var ( - lastQueriesCount = flag.Int("search.queryStats.lastQueriesCount", 20000, "Query stats for `/api/v1/status/top_queries` is tracked on this number of last queries. "+ + lastQueriesCount = flag.Int("search.queryStats.lastQueriesCount", 20000, "Query stats for /api/v1/status/top_queries is tracked on this number of last queries. "+ "Zero value disables query stats tracking") - minQueryDuration = flag.Duration("search.queryStats.minQueryDuration", 0, "The minimum duration for queries to track in query stats at `/api/v1/status/top_queries`. "+ + minQueryDuration = flag.Duration("search.queryStats.minQueryDuration", 0, "The minimum duration for queries to track in query stats at /api/v1/status/top_queries. "+ "Queries with lower duration are ignored in query stats") ) diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 07ff8889c..c68e8d76d 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -170,6 +170,7 @@ Alphabetically sorted links to case studies: * [Font used](#font-used) * [Color Palette](#color-palette) * [We kindly ask](#we-kindly-ask) +* [List of command-line flags](#list-of-command-line-flags) ## How to start VictoriaMetrics @@ -182,7 +183,7 @@ The following command-line flags are used the most: * `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory. * `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [these docs](#retention) for more details. -Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see all the available flags with description and default values. +Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags). See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics](#grafana-setup) and how to [handle alerts](#alerting). @@ -1545,3 +1546,248 @@ Files included in each folder: * There should be sufficient clear space around the logo. * Do not change spacing, alignment, or relative locations of the design elements. * Do not change the proportions of any of the design elements or the design itself. You may resize as needed but must retain all proportions. + + +## List of command-line flags + +Pass `-help` to VictoriaMetrics in order to see the list of supported command-line flags with their description: + +``` + -bigMergeConcurrency int + The maximum number of CPU cores to use for big merges. Default value is used if set to 0 + -csvTrimTimestamp duration + Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -dedup.minScrapeInterval duration + Remove superflouos samples from time series if they are located closer to each other than this duration. This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. Deduplication is disabled if the -dedup.minScrapeInterval is 0 + -deleteAuthKey string + authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries + -denyQueriesOutsideRetention + Whether to deny queries outside of the configured -retentionPeriod. When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. This may be useful when multiple data sources with distinct retentions are hidden behind query-tee + -dryRun + Whether to check only -promscrape.config and then exit. Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse + -enableTCP6 + Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP is used + -envflag.enable + Whether to enable reading flags from environment variables additionally to command line. Command line flag values have priority over values from environment vars. Flags are read only from command line if this flag isn't set + -envflag.prefix string + Prefix for environment variables if -envflag.enable is set + -finalMergeDelay duration + The delay before starting final merge for per-month partition after no new data is ingested into it. Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. Zero value disables final merge + -forceFlushAuthKey string + authKey, which must be passed in query string to /internal/force_flush pages + -forceMergeAuthKey string + authKey, which must be passed in query string to /internal/force_merge pages + -fs.disableMmap + Whether to use pread() instead of mmap() for reading data files. By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread() + -graphiteListenAddr string + TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty + -graphiteTrimTimestamp duration + Trim timestamps for Graphite data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) + -http.connTimeout duration + Incoming http connections are closed after the configured timeout. This may help spreading incoming load among a cluster of services behind load balancer. Note that the real timeout may be bigger by up to 10% as a protection from Thundering herd problem (default 2m0s) + -http.disableResponseCompression + Disable compression of HTTP responses for saving CPU resources. By default compression is enabled to save network bandwidth + -http.idleConnTimeout duration + Timeout for incoming idle http connections (default 1m0s) + -http.maxGracefulShutdownDuration duration + The maximum duration for graceful shutdown of HTTP server. Highly loaded server may require increased value for graceful shutdown (default 7s) + -http.pathPrefix string + An optional prefix to add to all the paths handled by http server. For example, if '-http.pathPrefix=/foo/bar' is set, then all the http requests will be handled on '/foo/bar/*' paths. This may be useful for proxied requests. See https://www.robustperception.io/using-external-urls-and-proxies-with-prometheus + -http.shutdownDelay duration + Optional delay before http server shutdown. During this dealy the servier returns non-OK responses from /health page, so load balancers can route new requests to other servers + -httpAuth.password string + Password for HTTP Basic Auth. The authentication is disabled if -httpAuth.username is empty + -httpAuth.username string + Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password + -httpListenAddr string + TCP address to listen for http connections (default ":8428") + -import.maxLineLen size + The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -influx.databaseNames array + Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb + Supports array of values separated by comma or specified via multiple flags. + -influx.maxLineSize size + The maximum size in bytes for a single Influx line during parsing + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144) + -influxListenAddr string + TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://:8428/write + -influxMeasurementFieldSeparator string + Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_") + -influxSkipMeasurement + Uses '{field_name}' as a metric name while ignoring '{measurement}' and '-influxMeasurementFieldSeparator' + -influxSkipSingleField + Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field + -influxTrimTimestamp duration + Trim timestamps for Influx line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -insert.maxQueueDuration duration + The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) + -loggerDisableTimestamps + Whether to disable writing timestamps in logs + -loggerErrorsPerSecondLimit int + Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, then the remaining errors are suppressed. Zero value disables the rate limit + -loggerFormat string + Format for logs. Possible values: default, json (default "default") + -loggerLevel string + Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "INFO") + -loggerOutput string + Output for the logs. Supported values: stderr, stdout (default "stderr") + -loggerTimezone string + Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") + -loggerWarnsPerSecondLimit int + Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit + -maxConcurrentInserts int + The maximum number of concurrent inserts. Default value should work for most cases, since it minimizes the overhead for concurrent inserts. This option is tigthly coupled with -insert.maxQueueDuration (default 16) + -maxInsertRequestSize size + The maximum size in bytes of a single Prometheus remote_write API request + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -maxLabelsPerTimeseries int + The maximum number of labels accepted per time series. Superfluous labels are dropped (default 30) + -memory.allowedBytes size + Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedPercent float + Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) + -metricsAuthKey string + Auth key for /metrics. It overrides httpAuth settings + -opentsdbHTTPListenAddr string + TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty + -opentsdbListenAddr string + TCP and UDP address to listen for OpentTSDB metrics. Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. Usually :4242 must be set. Doesn't work if empty + -opentsdbTrimTimestamp duration + Trim timestamps for OpenTSDB 'telnet put' data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) + -opentsdbhttp.maxInsertRequestSize size + The maximum size of OpenTSDB HTTP put request + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -opentsdbhttpTrimTimestamp duration + Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -pprofAuthKey string + Auth key for /debug/pprof. It overrides httpAuth settings + -precisionBits int + The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64) + -promscrape.cluster.memberNum int + The number of number in the cluster of scrapers. It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster + -promscrape.cluster.membersCount int + The number of members in a cluster of scrapers. Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets + -promscrape.cluster.replicationFactor int + The number of members in the cluster, which scrape the same targets. If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://victoriametrics.github.io/#deduplication (default 1) + -promscrape.config string + Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. See https://victoriametrics.github.io/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details + -promscrape.config.dryRun + Checks -promscrape.config file for errors and unsupported fields and then exits. Returns non-zero exit code on parsing errors and emits these errors to stderr. See also -promscrape.config.strictParse command-line flag. Pass -loggerLevel=ERROR if you don't need to see info messages in the output. + -promscrape.config.strictParse + Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped + -promscrape.configCheckInterval duration + Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes + -promscrape.consulSDCheckInterval duration + Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s) + -promscrape.disableCompression + Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.disableKeepAlive + Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets + -promscrape.discovery.concurrency int + The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100) + -promscrape.discovery.concurrentWaitTime duration + The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) + -promscrape.dnsSDCheckInterval duration + Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) + -promscrape.dockerswarmSDCheckInterval duration + Interval for checking for changes in dockerswarm. This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details (default 30s) + -promscrape.dropOriginalLabels + Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. Note that this reduces debuggability for improper per-target relabeling configs + -promscrape.ec2SDCheckInterval duration + Interval for checking for changes in ec2. This works only if ec2_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config for details (default 1m0s) + -promscrape.eurekaSDCheckInterval duration + Interval for checking for changes in eureka. This works only if eureka_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config for details (default 30s) + -promscrape.fileSDCheckInterval duration + Interval for checking for changes in 'file_sd_config'. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config for details (default 30s) + -promscrape.gceSDCheckInterval duration + Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) + -promscrape.kubernetes.apiServerTimeout duration + How frequently to reload the full state from Kuberntes API server (default 30m0s) + -promscrape.kubernetesSDCheckInterval duration + Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) + -promscrape.maxDroppedTargets int + The maximum number of droppedTargets shown at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000) + -promscrape.maxScrapeSize size + The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216) + -promscrape.openstackSDCheckInterval duration + Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s) + -promscrape.streamParse + Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.suppressDuplicateScrapeTargetErrors + Whether to suppress 'duplicate scrape target' errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details + -promscrape.suppressScrapeErrors + Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed + -relabelConfig string + Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://victoriametrics.github.io/#relabeling for details + -retentionPeriod value + Data with timestamps outside the retentionPeriod is automatically deleted + The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1) + -search.cacheTimestampOffset duration + The maximum duration since the current time for response data, which is always queried from the original raw data, without using the response cache. Increase this value if you see gaps in responses due to time synchronization issues between VictoriaMetrics and data sources (default 5m0s) + -search.disableCache + Whether to disable response caching. This may be useful during data backfilling + -search.latencyOffset duration + The time when data points become visible in query results after the collection. Too small value can result in incomplete last points for query results (default 30s) + -search.logSlowQueryDuration duration + Log queries with execution time exceeding this value. Zero disables slow query logging (default 5s) + -search.maxConcurrentRequests int + The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8) + -search.maxExportDuration duration + The maximum duration for /api/v1/export call (default 720h0m0s) + -search.maxLookback duration + Synonim to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons + -search.maxPointsPerTimeseries int + The maximum points per a single timeseries returned from /api/v1/query_range. This option doesn't limit the number of scanned raw samples in the database. The main purpose of this option is to limit the number of per-series points returned to graphing UI such as Grafana. There is no sense in setting this limit to values significantly exceeding horizontal resoultion of the graph (default 30000) + -search.maxQueryDuration duration + The maximum duration for query execution (default 30s) + -search.maxQueryLen size + The maximum search query length in bytes + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16384) + -search.maxQueueDuration duration + The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached; see also -search.maxQueryDuration (default 10s) + -search.maxStalenessInterval duration + The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons + -search.maxStepForPointsAdjustment duration + The maximum step when /api/v1/query_range handler adjusts points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data (default 1m0s) + -search.maxTagKeys int + The maximum number of tag keys returned from /api/v1/labels (default 100000) + -search.maxTagValueSuffixesPerSearch int + The maximum number of tag value suffixes returned from /metrics/find (default 100000) + -search.maxTagValues int + The maximum number of tag values returned from /api/v1/label//values (default 100000) + -search.maxUniqueTimeseries int + The maximum number of unique time series each search can scan (default 300000) + -search.minStalenessInterval duration + The minimum interval for staleness calculations. This flag could be useful for removing gaps on graphs generated from time series with irregular intervals between samples. See also '-search.maxStalenessInterval' + -search.queryStats.lastQueriesCount int + Query stats for /api/v1/status/top_queries is tracked on this number of last queries. Zero value disables query stats tracking (default 20000) + -search.queryStats.minQueryDuration int + The minimum duration for queries to track in query stats at /api/v1/status/top_queries. Queries with lower duration are ignored in query stats + -search.resetCacheAuthKey string + Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call + -search.treatDotsAsIsInRegexps + Whether to treat dots as is in regexp label filters used in queries. For example, foo{bar=~"a.b.c"} will be automatically converted to foo{bar=~"a\\.b\\.c"}, i.e. all the dots in regexp filters will be automatically escaped in order to match only dot char instead of matching any char. Dots in ".+", ".*" and ".{n}" regexps aren't escaped. This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter + -selfScrapeInstance string + Value for 'instance' label, which is added to self-scraped metrics (default "self") + -selfScrapeInterval duration + Interval for self-scraping own metrics at /metrics page + -selfScrapeJob string + Value for 'job' label, which is added to self-scraped metrics (default "victoria-metrics") + -smallMergeConcurrency int + The maximum number of CPU cores to use for small merges. Default value is used if set to 0 + -snapshotAuthKey string + authKey, which must be passed in query string to /snapshot* pages + -storageDataPath string + Path to storage data (default "victoria-metrics-data") + -tls + Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set + -tlsCertFile string + Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs, since RSA certs are slow + -tlsKeyFile string + Path to file with TLS key. Used only if -tls is set + -version + Show VictoriaMetrics version +``` diff --git a/docs/vmagent.md b/docs/vmagent.md index 86fd70dd6..f1625ada8 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -512,16 +512,16 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password -httpListenAddr string TCP address to listen for http connections. Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. Note that /targets and /metrics pages aren't available if -httpListenAddr='' (default ":8429") - -import.maxLineLen max_rows_per_line - The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with max_rows_per_line query arg passed to /api/v1/export - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 104857600) + -import.maxLineLen size + The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 104857600) -influx.databaseNames array Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb Supports array of values separated by comma or specified via multiple flags. - -influx.maxLineSize value + -influx.maxLineSize size The maximum size in bytes for a single Influx line during parsing - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 262144) - -influxListenAddr http://:8429/write + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144) + -influxListenAddr string TCP and UDP address to listen for Influx line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://:8429/write -influxMeasurementFieldSeparator string Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol (default "_") @@ -549,12 +549,12 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit -maxConcurrentInserts int The maximum number of concurrent inserts. Default value should work for most cases, since it minimizes the overhead for concurrent inserts. This option is tigthly coupled with -insert.maxQueueDuration (default 16) - -maxInsertRequestSize value + -maxInsertRequestSize size The maximum size in bytes of a single Prometheus remote_write API request - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 33554432) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string @@ -565,9 +565,9 @@ See the docs at https://victoriametrics.github.io/vmagent.html . TCP and UDP address to listen for OpentTSDB metrics. Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. Usually :4242 must be set. Doesn't work if empty -opentsdbTrimTimestamp duration Trim timestamps for OpenTSDB 'telnet put' data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s) - -opentsdbhttp.maxInsertRequestSize value + -opentsdbhttp.maxInsertRequestSize size The maximum size of OpenTSDB HTTP put request - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 33554432) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 33554432) -opentsdbhttpTrimTimestamp duration Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) -pprofAuthKey string @@ -586,45 +586,45 @@ See the docs at https://victoriametrics.github.io/vmagent.html . Whether to allow only supported fields in -promscrape.config . By default unsupported fields are silently skipped -promscrape.configCheckInterval duration Interval for checking for changes in '-promscrape.config' file. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes - -promscrape.consulSDCheckInterval consul_sd_configs + -promscrape.consulSDCheckInterval duration Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s) -promscrape.disableCompression Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control - -promscrape.disableKeepAlive disable_keepalive: true - Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set disable_keepalive: true individually per each 'scrape_config` section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets + -promscrape.disableKeepAlive + Whether to disable HTTP keep-alive connections when scraping all the targets. This may be useful when targets has no support for HTTP keep-alive connection. It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets -promscrape.discovery.concurrency int The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100) -promscrape.discovery.concurrentWaitTime duration The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s) - -promscrape.dnsSDCheckInterval dns_sd_configs + -promscrape.dnsSDCheckInterval duration Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s) - -promscrape.dockerswarmSDCheckInterval dockerswarm_sd_configs + -promscrape.dockerswarmSDCheckInterval duration Interval for checking for changes in dockerswarm. This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details (default 30s) -promscrape.dropOriginalLabels Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. Note that this reduces debuggability for improper per-target relabeling configs - -promscrape.ec2SDCheckInterval ec2_sd_configs + -promscrape.ec2SDCheckInterval duration Interval for checking for changes in ec2. This works only if ec2_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config for details (default 1m0s) - -promscrape.eurekaSDCheckInterval eureka_sd_configs + -promscrape.eurekaSDCheckInterval duration Interval for checking for changes in eureka. This works only if eureka_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config for details (default 30s) -promscrape.fileSDCheckInterval duration Interval for checking for changes in 'file_sd_config'. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config for details (default 30s) - -promscrape.gceSDCheckInterval gce_sd_configs + -promscrape.gceSDCheckInterval duration Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details (default 1m0s) -promscrape.kubernetes.apiServerTimeout duration How frequently to reload the full state from Kuberntes API server (default 30m0s) - -promscrape.kubernetesSDCheckInterval kubernetes_sd_configs + -promscrape.kubernetesSDCheckInterval duration Interval for checking for changes in Kubernetes API server. This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details (default 30s) - -promscrape.maxDroppedTargets droppedTargets + -promscrape.maxDroppedTargets int The maximum number of droppedTargets shown at /api/v1/targets page. Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. Note that the increased number of tracked dropped targets may result in increased memory usage (default 1000) - -promscrape.maxScrapeSize value + -promscrape.maxScrapeSize size The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 16777216) - -promscrape.openstackSDCheckInterval openstack_sd_configs + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216) + -promscrape.openstackSDCheckInterval duration Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s) - -promscrape.streamParse stream_parse: true - Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set stream_parse: true individually per each `scrape_config` section in `-promscrape.config` for fine grained control - -promscrape.suppressDuplicateScrapeTargetErrors duplicate scrape target - Whether to suppress duplicate scrape target errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details + -promscrape.streamParse + Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful for reducing memory usage when millions of metrics are exposed per each scrape target. It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control + -promscrape.suppressDuplicateScrapeTargetErrors + Whether to suppress 'duplicate scrape target' errors; see https://victoriametrics.github.io/vmagent.html#troubleshooting for details -promscrape.suppressScrapeErrors Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed -remoteWrite.basicAuth.password array @@ -641,12 +641,12 @@ See the docs at https://victoriametrics.github.io/vmagent.html . -remoteWrite.label array Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. Pass multiple -remoteWrite.label flags in order to add multiple flags to metrics before sending them to remote storage Supports array of values separated by comma or specified via multiple flags. - -remoteWrite.maxBlockSize value + -remoteWrite.maxBlockSize size The maximum size in bytes of unpacked request to send to remote storage. It shouldn't exceed -maxInsertRequestSize from VictoriaMetrics - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 8388608) - -remoteWrite.maxDiskUsagePerURL value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 8388608) + -remoteWrite.maxDiskUsagePerURL size The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500000000. Disk usage is unlimited if the value is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -remoteWrite.proxyURL array Optional proxy URL for writing data to -remoteWrite.url. Supported proxies: http, https, socks5. Example: -remoteWrite.proxyURL=socks5://proxy:1234 Supports array of values separated by comma or specified via multiple flags. diff --git a/docs/vmalert.md b/docs/vmalert.md index 5a286c289..691ab86a0 100644 --- a/docs/vmalert.md +++ b/docs/vmalert.md @@ -272,9 +272,9 @@ The shortlist of configuration flags is the following: Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -memory.allowedBytes value + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string diff --git a/docs/vmauth.md b/docs/vmauth.md index 133718787..4baa2b9e8 100644 --- a/docs/vmauth.md +++ b/docs/vmauth.md @@ -208,9 +208,9 @@ See the docs at https://victoriametrics.github.io/vmauth.html . Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -memory.allowedBytes value + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -metricsAuthKey string diff --git a/docs/vmbackup.md b/docs/vmbackup.md index a26357b89..d471847a7 100644 --- a/docs/vmbackup.md +++ b/docs/vmbackup.md @@ -205,12 +205,12 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time- Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -maxBytesPerSecond value + -maxBytesPerSecond size The maximum upload speed. There is no limit if it is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -origin string diff --git a/docs/vmrestore.md b/docs/vmrestore.md index 67127d5be..3fa55905d 100644 --- a/docs/vmrestore.md +++ b/docs/vmrestore.md @@ -105,12 +105,12 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC") -loggerWarnsPerSecondLimit int Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit - -maxBytesPerSecond value + -maxBytesPerSecond size The maximum download speed. There is no limit if it is set to 0 - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) - -memory.allowedBytes value + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) + -memory.allowedBytes size Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to non-zero value. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage - Supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB (default 0) + Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0) -memory.allowedPercent float Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low value may increase cache miss rate, which usually results in higher CPU and disk IO usage. Too high value may evict too much data from OS page cache, which will result in higher disk IO usage (default 60) -skipBackupCompleteCheck diff --git a/lib/flagutil/bytes.go b/lib/flagutil/bytes.go index 08f012fc0..de15229a3 100644 --- a/lib/flagutil/bytes.go +++ b/lib/flagutil/bytes.go @@ -9,7 +9,7 @@ import ( // NewBytes returns new `bytes` flag with the given name, defaultValue and description. func NewBytes(name string, defaultValue int, description string) *Bytes { - description += "\nSupports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB" + description += "\nSupports the following optional suffixes for `size` values: KB, MB, GB, KiB, MiB, GiB" b := Bytes{ N: defaultValue, valueString: fmt.Sprintf("%d", defaultValue), diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index f6a264802..0f2ddd202 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -27,11 +27,11 @@ var ( "It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control") disableKeepAlive = flag.Bool("promscrape.disableKeepAlive", false, "Whether to disable HTTP keep-alive connections when scraping all the targets. "+ "This may be useful when targets has no support for HTTP keep-alive connection. "+ - "It is possible to set `disable_keepalive: true` individually per each 'scrape_config` section in '-promscrape.config' for fine grained control. "+ + "It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control. "+ "Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets") streamParse = flag.Bool("promscrape.streamParse", false, "Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful "+ "for reducing memory usage when millions of metrics are exposed per each scrape target. "+ - "It is posible to set `stream_parse: true` individually per each `scrape_config` section in `-promscrape.config` for fine grained control") + "It is posible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control") ) type client struct { diff --git a/lib/promscrape/discovery/consul/watch.go b/lib/promscrape/discovery/consul/watch.go index 06acaedf5..bafb3f183 100644 --- a/lib/promscrape/discovery/consul/watch.go +++ b/lib/promscrape/discovery/consul/watch.go @@ -15,7 +15,7 @@ import ( // SDCheckInterval is check interval for Consul service discovery. var SDCheckInterval = flag.Duration("promscrape.consulSDCheckInterval", 30*time.Second, "Interval for checking for changes in Consul. "+ - "This works only if `consul_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if consul_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details") // consulWatcher is a watcher for consul api, updates services map in background with long-polling. diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go index c0b68edc2..d02f1732f 100644 --- a/lib/promscrape/scraper.go +++ b/lib/promscrape/scraper.go @@ -21,29 +21,29 @@ var ( fileSDCheckInterval = flag.Duration("promscrape.fileSDCheckInterval", 30*time.Second, "Interval for checking for changes in 'file_sd_config'. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config for details") kubernetesSDCheckInterval = flag.Duration("promscrape.kubernetesSDCheckInterval", 30*time.Second, "Interval for checking for changes in Kubernetes API server. "+ - "This works only if `kubernetes_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if kubernetes_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config for details") openstackSDCheckInterval = flag.Duration("promscrape.openstackSDCheckInterval", 30*time.Second, "Interval for checking for changes in openstack API server. "+ - "This works only if `openstack_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if openstack_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details") eurekaSDCheckInterval = flag.Duration("promscrape.eurekaSDCheckInterval", 30*time.Second, "Interval for checking for changes in eureka. "+ - "This works only if `eureka_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if eureka_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config for details") dnsSDCheckInterval = flag.Duration("promscrape.dnsSDCheckInterval", 30*time.Second, "Interval for checking for changes in dns. "+ - "This works only if `dns_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if dns_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details") ec2SDCheckInterval = flag.Duration("promscrape.ec2SDCheckInterval", time.Minute, "Interval for checking for changes in ec2. "+ - "This works only if `ec2_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if ec2_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config for details") gceSDCheckInterval = flag.Duration("promscrape.gceSDCheckInterval", time.Minute, "Interval for checking for changes in gce. "+ - "This works only if `gce_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if gce_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config for details") dockerswarmSDCheckInterval = flag.Duration("promscrape.dockerswarmSDCheckInterval", 30*time.Second, "Interval for checking for changes in dockerswarm. "+ - "This works only if `dockerswarm_sd_configs` is configured in '-promscrape.config' file. "+ + "This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. "+ "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details") promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+ "See https://victoriametrics.github.io/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details") - suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress `duplicate scrape target` errors; "+ + suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress 'duplicate scrape target' errors; "+ "see https://victoriametrics.github.io/vmagent.html#troubleshooting for details") ) diff --git a/lib/promscrape/targetstatus.go b/lib/promscrape/targetstatus.go index 47b26f4df..21998f47b 100644 --- a/lib/promscrape/targetstatus.go +++ b/lib/promscrape/targetstatus.go @@ -18,7 +18,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" ) -var maxDroppedTargets = flag.Int("promscrape.maxDroppedTargets", 1000, "The maximum number of `droppedTargets` shown at /api/v1/targets page. "+ +var maxDroppedTargets = flag.Int("promscrape.maxDroppedTargets", 1000, "The maximum number of droppedTargets to show at /api/v1/targets page. "+ "Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. "+ "Note that the increased number of tracked dropped targets may result in increased memory usage") diff --git a/lib/protoparser/vmimport/streamparser.go b/lib/protoparser/vmimport/streamparser.go index 183b316e5..7646d6cee 100644 --- a/lib/protoparser/vmimport/streamparser.go +++ b/lib/protoparser/vmimport/streamparser.go @@ -15,7 +15,7 @@ import ( ) var maxLineLen = flagutil.NewBytes("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import; "+ - "the line length can be limited with `max_rows_per_line` query arg passed to /api/v1/export") + "the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export") // ParseStream parses /api/v1/import lines from req and calls callback for the parsed rows. // From acf2a82d3c5ec0938b89b86fbae83df0f86c48cc Mon Sep 17 00:00:00 2001 From: f41gh7 Date: Mon, 15 Mar 2021 23:03:52 +0300 Subject: [PATCH 57/59] typo fix --- lib/influxutils/influxutils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/influxutils/influxutils.go b/lib/influxutils/influxutils.go index 5a648b9e5..01af90864 100644 --- a/lib/influxutils/influxutils.go +++ b/lib/influxutils/influxutils.go @@ -23,7 +23,7 @@ func WriteDatabaseNames(w http.ResponseWriter) { } dbs := make([]string, len(dbNames)) for i := range dbNames { - dbs[i] = fmt.Sprintf(`"[%s]"`, dbNames[i]) + dbs[i] = fmt.Sprintf(`[%q]`, dbNames[i]) } fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, strings.Join(dbs, ",")) } From 37323c57c9520b1041ee16587a19a2efc5b71546 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 22:18:15 +0200 Subject: [PATCH 58/59] lib/influxutils: return response compatible with InfluxDB 1.8.4 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124 --- lib/influxutils/influxutils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/influxutils/influxutils.go b/lib/influxutils/influxutils.go index 01af90864..f441ddd49 100644 --- a/lib/influxutils/influxutils.go +++ b/lib/influxutils/influxutils.go @@ -25,5 +25,5 @@ func WriteDatabaseNames(w http.ResponseWriter) { for i := range dbNames { dbs[i] = fmt.Sprintf(`[%q]`, dbNames[i]) } - fmt.Fprintf(w, `{"results":[{"name":"databases","columns":["name"],"series":[{"values":[%s]}]}]}`, strings.Join(dbs, ",")) + fmt.Fprintf(w, `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"],"values":[%s]}]}]}`, strings.Join(dbs, ",")) } From dd7e82c34fc01c17f96e51ff5f9f5839229c52ea Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 15 Mar 2021 22:38:50 +0200 Subject: [PATCH 59/59] app/vmstorage: add `-logNewSeries` command-line flag for determining the source of series churn rate --- README.md | 2 ++ app/vmstorage/main.go | 3 +++ docs/CHANGELOG.md | 1 + docs/Single-server-VictoriaMetrics.md | 2 ++ lib/storage/index_db.go | 12 ++++++++++++ 5 files changed, 20 insertions(+) diff --git a/README.md b/README.md index c68e8d76d..b602c58fc 100644 --- a/README.md +++ b/README.md @@ -1375,6 +1375,8 @@ See the example of alerting rules for VM components [here](https://github.com/Vi VictoriaMetrics accepts optional `date=YYYY-MM-DD` and `topN=42` args on this page. By default `date` equals to the current date, while `topN` equals to 10. +* New time series can be logged if `-logNewSeries` command-line flag is passed to VictoriaMetrics. + * VictoriaMetrics limits the number of labels per each metric with `-maxLabelsPerTimeseries` command-line flag. This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total` metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload. diff --git a/app/vmstorage/main.go b/app/vmstorage/main.go index 4a96fd130..731fc0002 100644 --- a/app/vmstorage/main.go +++ b/app/vmstorage/main.go @@ -36,6 +36,8 @@ var ( bigMergeConcurrency = flag.Int("bigMergeConcurrency", 0, "The maximum number of CPU cores to use for big merges. Default value is used if set to 0") smallMergeConcurrency = flag.Int("smallMergeConcurrency", 0, "The maximum number of CPU cores to use for small merges. Default value is used if set to 0") + logNewSeries = flag.Bool("logNewSeries", false, "Whether to log new series. This option is for debug purposes only. It can lead to performance issues "+ + "when big number of new series are ingested into VictoriaMetrics") denyQueriesOutsideRetention = flag.Bool("denyQueriesOutsideRetention", false, "Whether to deny queries outside of the configured -retentionPeriod. "+ "When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. "+ "This may be useful when multiple data sources with distinct retentions are hidden behind query-tee") @@ -71,6 +73,7 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) { } resetResponseCacheIfNeeded = resetCacheIfNeeded + storage.SetLogNewSeries(*logNewSeries) storage.SetFinalMergeDelay(*finalMergeDelay) storage.SetBigMergeWorkersCount(*bigMergeConcurrency) storage.SetSmallMergeWorkersCount(*smallMergeConcurrency) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 26411e4e8..dbb7f85ac 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -14,6 +14,7 @@ * FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details. * FEATURE: accept `round_digits` query arg at `/api/v1/query` and `/api/v1/query_range` handlers. This option can be set at Prometheus datasource in Grafana for limiting the number of digits after the decimal point in response values. * FEATURE: add `-influx.databaseNames` command-line flag, which can be used for accepting data from some Telegraf plugins such as [fluentd plugin](https://github.com/fangli/fluent-plugin-influxdb). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1124). +* FEATURE: add `-logNewSeries` command-line flag, which can be used for debugging the source of time series churn rate. * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index c68e8d76d..b602c58fc 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -1375,6 +1375,8 @@ See the example of alerting rules for VM components [here](https://github.com/Vi VictoriaMetrics accepts optional `date=YYYY-MM-DD` and `topN=42` args on this page. By default `date` equals to the current date, while `topN` equals to 10. +* New time series can be logged if `-logNewSeries` command-line flag is passed to VictoriaMetrics. + * VictoriaMetrics limits the number of labels per each metric with `-maxLabelsPerTimeseries` command-line flag. This prevents from ingesting metrics with too many labels. It is recommended [monitoring](#monitoring) `vm_metrics_with_dropped_labels_total` metric in order to determine whether `-maxLabelsPerTimeseries` must be adjusted for your workload. diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 4e8ee3411..5d8af6c22 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -577,9 +577,21 @@ func (db *indexDB) createTSIDByName(dst *TSID, metricName []byte) error { // on db.tb flush via invalidateTagCache flushCallback passed to OpenTable. atomic.AddUint64(&db.newTimeseriesCreated, 1) + if logNewSeries { + logger.Infof("new series created: %s", mn.String()) + } return nil } +// SetLogNewSeries updates new series logging. +// +// This function must be called before any calling any storage functions. +func SetLogNewSeries(ok bool) { + logNewSeries = ok +} + +var logNewSeries = false + func (db *indexDB) generateTSID(dst *TSID, metricName []byte, mn *MetricName) error { // Search the TSID in the external storage. // This is usually the db from the previous period.