mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
c83ff99e0d
91 changed files with 8126 additions and 1864 deletions
26
.github/workflows/main.yml
vendored
26
.github/workflows/main.yml
vendored
|
@ -38,30 +38,8 @@ jobs:
|
|||
make test-full
|
||||
make test-pure
|
||||
make test-full-386
|
||||
make victoria-metrics
|
||||
make victoria-metrics-pure
|
||||
make victoria-metrics-arm
|
||||
make victoria-metrics-arm64
|
||||
make vmutils
|
||||
GOOS=freebsd go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmagent
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmalert
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmbackup
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmrestore
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmctl
|
||||
GOOS=openbsd go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmagent
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmalert
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmbackup
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmrestore
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmctl
|
||||
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=darwin go build -mod=vendor ./app/vmagent
|
||||
GOOS=darwin go build -mod=vendor ./app/vmalert
|
||||
GOOS=darwin go build -mod=vendor ./app/vmbackup
|
||||
GOOS=darwin go build -mod=vendor ./app/vmrestore
|
||||
GOOS=darwin go build -mod=vendor ./app/vmctl
|
||||
CGO_ENABLED=0 GOOS=windows go build -mod=vendor ./app/vmagent
|
||||
make victoria-metrics-crossbuild
|
||||
make vmuitils-crossbuild
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
|
|
298
Makefile
298
Makefile
|
@ -13,6 +13,10 @@ GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TA
|
|||
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
include snap/local/Makefile
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod \
|
||||
vmagent-prod \
|
||||
|
@ -22,11 +26,6 @@ all: \
|
|||
vmrestore-prod \
|
||||
vmctl-prod
|
||||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
include snap/local/Makefile
|
||||
|
||||
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
|
@ -64,21 +63,77 @@ vmutils-pure: \
|
|||
vmrestore-pure \
|
||||
vmctl-pure
|
||||
|
||||
vmutils-arm64: \
|
||||
vmagent-arm64 \
|
||||
vmalert-arm64 \
|
||||
vmauth-arm64 \
|
||||
vmbackup-arm64 \
|
||||
vmrestore-arm64 \
|
||||
vmctl-arm64
|
||||
vmutils-linux-amd64: \
|
||||
vmagent-linux-amd64 \
|
||||
vmalert-linux-amd64 \
|
||||
vmauth-linux-amd64 \
|
||||
vmbackup-linux-amd64 \
|
||||
vmrestore-linux-amd64 \
|
||||
vmctl-linux-amd64
|
||||
|
||||
vmutils-arm: \
|
||||
vmagent-arm \
|
||||
vmalert-arm \
|
||||
vmauth-arm \
|
||||
vmbackup-arm \
|
||||
vmrestore-arm \
|
||||
vmctl-arm
|
||||
vmutils-linux-arm64: \
|
||||
vmagent-linux-arm64 \
|
||||
vmalert-linux-arm64 \
|
||||
vmauth-linux-arm64 \
|
||||
vmbackup-linux-arm64 \
|
||||
vmrestore-linux-arm64 \
|
||||
vmctl-linux-arm64
|
||||
|
||||
vmutils-linux-arm: \
|
||||
vmagent-linux-arm \
|
||||
vmalert-linux-arm \
|
||||
vmauth-linux-arm \
|
||||
vmbackup-linux-arm \
|
||||
vmrestore-linux-arm \
|
||||
vmctl-linux-arm
|
||||
|
||||
vmutils-linux-386: \
|
||||
vmagent-linux-386 \
|
||||
vmalert-linux-386 \
|
||||
vmauth-linux-386 \
|
||||
vmbackup-linux-386 \
|
||||
vmrestore-linux-386 \
|
||||
vmctl-linux-386
|
||||
|
||||
vmutils-linux-ppc64le: \
|
||||
vmagent-linux-ppc64le \
|
||||
vmalert-linux-ppc64le \
|
||||
vmauth-linux-ppc64le \
|
||||
vmbackup-linux-ppc64le \
|
||||
vmrestore-linux-ppc64le \
|
||||
vmctl-linux-ppc64le
|
||||
|
||||
vmutils-darwin-amd64: \
|
||||
vmagent-darwin-amd64 \
|
||||
vmalert-darwin-amd64 \
|
||||
vmauth-darwin-amd64 \
|
||||
vmbackup-darwin-amd64 \
|
||||
vmrestore-darwin-amd64 \
|
||||
vmctl-darwin-amd64
|
||||
|
||||
vmutils-darwin-arm64: \
|
||||
vmagent-darwin-arm64 \
|
||||
vmalert-darwin-arm64 \
|
||||
vmauth-darwin-arm64 \
|
||||
vmbackup-darwin-arm64 \
|
||||
vmrestore-darwin-arm64 \
|
||||
vmctl-darwin-arm64
|
||||
|
||||
vmutils-freebsd-amd64: \
|
||||
vmagent-freebsd-amd64 \
|
||||
vmalert-freebsd-amd64 \
|
||||
vmauth-freebsd-amd64 \
|
||||
vmbackup-freebsd-amd64 \
|
||||
vmrestore-freebsd-amd64 \
|
||||
vmctl-freebsd-amd64
|
||||
|
||||
vmutils-openbsd-amd64: \
|
||||
vmagent-openbsd-amd64 \
|
||||
vmalert-openbsd-amd64 \
|
||||
vmauth-openbsd-amd64 \
|
||||
vmbackup-openbsd-amd64 \
|
||||
vmrestore-openbsd-amd64 \
|
||||
vmctl-openbsd-amd64
|
||||
|
||||
vmutils-windows-amd64: \
|
||||
vmagent-windows-amd64 \
|
||||
|
@ -86,6 +141,28 @@ vmutils-windows-amd64: \
|
|||
vmauth-windows-amd64 \
|
||||
vmctl-windows-amd64
|
||||
|
||||
victoria-metrics-crossbuild: \
|
||||
victoria-metrics-linux-amd64 \
|
||||
victoria-metrics-linux-arm64 \
|
||||
victoria-metrics-linux-arm \
|
||||
victoria-metrics-linux-386 \
|
||||
victoria-metrics-linux-ppc64le \
|
||||
victoria-metrics-darwin-amd64 \
|
||||
victoria-metrics-darwin-arm64 \
|
||||
victoria-metrics-freebsd-amd64 \
|
||||
victoria-metrics-openbsd-amd64
|
||||
|
||||
vmutils-crossbuild: \
|
||||
vmutils-linux-amd64 \
|
||||
vmutils-linux-arm64 \
|
||||
vmutils-linux-arm \
|
||||
vmutils-linux-386 \
|
||||
vmutils-linux-ppc64le \
|
||||
vmutils-darwin-amd64 \
|
||||
vmutils-darwin-arm64 \
|
||||
vmutils-freebsd-amd64 \
|
||||
vmutils-openbsd-amd64 \
|
||||
vmutils-windows-amd64
|
||||
|
||||
publish-release:
|
||||
git checkout $(TAG) && $(MAKE) release publish && \
|
||||
|
@ -98,86 +175,110 @@ release: \
|
|||
release-vmutils
|
||||
|
||||
release-victoria-metrics: \
|
||||
release-victoria-metrics-amd64 \
|
||||
release-victoria-metrics-arm \
|
||||
release-victoria-metrics-arm64 \
|
||||
release-victoria-metrics-linux-amd64 \
|
||||
release-victoria-metrics-linux-arm \
|
||||
release-victoria-metrics-linux-arm64 \
|
||||
release-victoria-metrics-darwin-amd64 \
|
||||
release-victoria-metrics-darwin-arm64
|
||||
release-victoria-metrics-darwin-arm64 \
|
||||
release-victoria-metrics-freebsd-amd64 \
|
||||
release-victoria-metrics-openbsd-amd64
|
||||
|
||||
release-victoria-metrics-amd64:
|
||||
OSARCH=amd64 $(MAKE) release-victoria-metrics-generic
|
||||
release-victoria-metrics-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-arm:
|
||||
OSARCH=arm $(MAKE) release-victoria-metrics-generic
|
||||
release-victoria-metrics-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-arm64:
|
||||
OSARCH=arm64 $(MAKE) release-victoria-metrics-generic
|
||||
release-victoria-metrics-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-darwin-amd64:
|
||||
OSARCH=darwin-amd64 $(MAKE) release-victoria-metrics-generic
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-darwin-arm64:
|
||||
OSARCH=darwin-arm64 $(MAKE) release-victoria-metrics-generic
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-generic: victoria-metrics-$(OSARCH)-prod
|
||||
release-victoria-metrics-freebsd-amd64:
|
||||
GOOS=freebsd GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-openbsd-amd64:
|
||||
GOOS=openbsd GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-goos-goarch: victoria-metrics-$(GOOS)-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar --transform="flags=r;s|-$(OSARCH)||" -czf victoria-metrics-$(OSARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-metrics-$(OSARCH)-prod \
|
||||
&& sha256sum victoria-metrics-$(OSARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-metrics-$(OSARCH)-prod \
|
||||
| sed s/-$(OSARCH)-prod/-prod/ > victoria-metrics-$(OSARCH)-$(PKG_TAG)_checksums.txt
|
||||
tar --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf victoria-metrics-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-metrics-$(GOOS)-$(GOARCH)-prod \
|
||||
&& sha256sum victoria-metrics-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-metrics-$(GOOS)-$(GOARCH)-prod \
|
||||
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > victoria-metrics-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf victoria-metrics-$(GOOS)-$(GOARCH)-prod
|
||||
|
||||
release-vmutils: \
|
||||
release-vmutils-amd64 \
|
||||
release-vmutils-arm64 \
|
||||
release-vmutils-arm \
|
||||
release-vmutils-darwin-amd64 \
|
||||
release-vmutils-linux-amd64 \
|
||||
release-vmutils-linux-arm64 \
|
||||
release-vmutils-linux-arm \
|
||||
release-vmutils-darwin-amd64 \
|
||||
release-vmutils-darwin-arm64 \
|
||||
release-vmutils-freebsd-amd64 \
|
||||
release-vmutils-openbsd-amd64 \
|
||||
release-vmutils-windows-amd64
|
||||
|
||||
release-vmutils-amd64:
|
||||
OSARCH=amd64 $(MAKE) release-vmutils-generic
|
||||
release-vmutils-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-arm64:
|
||||
OSARCH=arm64 $(MAKE) release-vmutils-generic
|
||||
release-vmutils-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-arm:
|
||||
OSARCH=arm $(MAKE) release-vmutils-generic
|
||||
release-vmutils-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-darwin-amd64:
|
||||
OSARCH=darwin-amd64 $(MAKE) release-vmutils-generic
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-darwin-arm64:
|
||||
OSARCH=darwin-arm64 $(MAKE) release-vmutils-generic
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-freebsd-amd64:
|
||||
GOOS=freebsd GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-openbsd-amd64:
|
||||
GOOS=openbsd GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-windows-amd64:
|
||||
GOARCH=amd64 $(MAKE) release-vmutils-windows-generic
|
||||
GOARCH=amd64 $(MAKE) release-vmutils-windows-goarch
|
||||
|
||||
release-vmutils-generic: \
|
||||
vmagent-$(OSARCH)-prod \
|
||||
vmalert-$(OSARCH)-prod \
|
||||
vmauth-$(OSARCH)-prod \
|
||||
vmbackup-$(OSARCH)-prod \
|
||||
vmrestore-$(OSARCH)-prod \
|
||||
vmctl-$(OSARCH)-prod
|
||||
release-vmutils-goos-goarch: \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar --transform="flags=r;s|-$(OSARCH)||" -czf vmutils-$(OSARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(OSARCH)-prod \
|
||||
vmalert-$(OSARCH)-prod \
|
||||
vmauth-$(OSARCH)-prod \
|
||||
vmbackup-$(OSARCH)-prod \
|
||||
vmrestore-$(OSARCH)-prod \
|
||||
vmctl-$(OSARCH)-prod \
|
||||
&& sha256sum vmutils-$(OSARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(OSARCH)-prod \
|
||||
vmalert-$(OSARCH)-prod \
|
||||
vmauth-$(OSARCH)-prod \
|
||||
vmbackup-$(OSARCH)-prod \
|
||||
vmrestore-$(OSARCH)-prod \
|
||||
vmctl-$(OSARCH)-prod \
|
||||
| sed s/-$(OSARCH)-prod/-prod/ > vmutils-$(OSARCH)-$(PKG_TAG)_checksums.txt
|
||||
tar --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
&& sha256sum vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod
|
||||
|
||||
release-vmutils-windows-generic: \
|
||||
release-vmutils-windows-goarch: \
|
||||
vmagent-windows-$(GOARCH)-prod \
|
||||
vmalert-windows-$(GOARCH)-prod \
|
||||
vmauth-windows-$(GOARCH)-prod \
|
||||
|
@ -194,18 +295,23 @@ release-vmutils-windows-generic: \
|
|||
vmauth-windows-$(GOARCH)-prod.exe \
|
||||
vmctl-windows-$(GOARCH)-prod.exe \
|
||||
> vmutils-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
vmagent-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-windows-$(GOARCH)-prod.exe \
|
||||
vmauth-windows-$(GOARCH)-prod.exe \
|
||||
vmctl-windows-$(GOARCH)-prod.exe
|
||||
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
|
||||
fmt:
|
||||
GO111MODULE=on gofmt -l -w -s ./lib
|
||||
GO111MODULE=on gofmt -l -w -s ./app
|
||||
gofmt -l -w -s ./lib
|
||||
gofmt -l -w -s ./app
|
||||
|
||||
vet:
|
||||
GO111MODULE=on go vet -mod=vendor ./lib/...
|
||||
GO111MODULE=on go vet -mod=vendor ./app/...
|
||||
go vet -mod=vendor ./lib/...
|
||||
go vet -mod=vendor ./app/...
|
||||
|
||||
lint: install-golint
|
||||
golint lib/...
|
||||
|
@ -232,45 +338,45 @@ install-errcheck:
|
|||
check-all: fmt vet lint errcheck golangci-lint
|
||||
|
||||
test:
|
||||
GO111MODULE=on go test -mod=vendor ./lib/... ./app/...
|
||||
go test -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
GO111MODULE=on go test -mod=vendor -race ./lib/... ./app/...
|
||||
go test -mod=vendor -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor ./lib/... ./app/...
|
||||
CGO_ENABLED=0 go test -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
GO111MODULE=on go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GO111MODULE=on GOARCH=386 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
GOARCH=386 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
benchmark:
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./app/...
|
||||
go test -mod=vendor -bench=. ./lib/...
|
||||
go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./app/...
|
||||
CGO_ENABLED=0 go test -mod=vendor -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
GO111MODULE=on go get -u -d ./lib/...
|
||||
GO111MODULE=on go get -u -d ./app/...
|
||||
GO111MODULE=on go mod tidy -compat=1.17
|
||||
GO111MODULE=on go mod vendor
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.17
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=1 go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-with-goarch:
|
||||
GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
app-local-goos-goarch:
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-windows-with-goarch:
|
||||
CGO_ENABLED=0 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
|
33
README.md
33
README.md
|
@ -15,18 +15,21 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t
|
|||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Just download VictoriaMetrics and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
Just download [the latest version of VictoriaMetrics](https://docs.victoriametrics.com/CHANGELOG.html)
|
||||
and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
|
||||
The cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/products/enterprise/).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
## Prominent features
|
||||
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
@ -110,12 +113,21 @@ The following command-line flags are used the most:
|
|||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
The following docs may be useful during initial VictoriaMetrics setup:
|
||||
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to ingest data to VictoriaMetrics](#how-to-import-time-series-data)
|
||||
* [How to set up Prometheus to write data to VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-setup)
|
||||
* [How to query VictoriaMetrics via Grafana](#grafana-setup)
|
||||
* [How to query VictoriaMetrics via Graphite API](#graphite-api-usage)
|
||||
* [How to handle alerts](#alerting)
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
@ -228,6 +240,8 @@ Then build graphs and dashboards for the created datasource using [PromQL](https
|
|||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
@ -636,6 +650,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
|||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
Additionally, VictoriaMetrics provides the following handlers:
|
||||
|
||||
|
@ -730,14 +745,14 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-arm-prod` or `make victoria-metrics-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm-prod` or `victoria-metrics-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm-prod` or `make victoria-metrics-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm-prod` or `victoria-metrics-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Pure Go build (CGO_ENABLED=0)
|
||||
|
||||
|
@ -1255,7 +1270,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||
|
||||
|
@ -1263,6 +1278,8 @@ The de-duplication reduces disk space usage if multiple identically configured [
|
|||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
|
||||
It is recommended passing different `-promscrape.cluster.name` values to HA paris of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
## Storage
|
||||
|
||||
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
|
||||
|
|
|
@ -12,20 +12,20 @@ victoria-metrics-prod:
|
|||
victoria-metrics-pure-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-pure
|
||||
|
||||
victoria-metrics-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-amd64
|
||||
victoria-metrics-linux-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
victoria-metrics-arm-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-arm
|
||||
victoria-metrics-linux-arm-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
victoria-metrics-arm64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-arm64
|
||||
victoria-metrics-linux-arm64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
victoria-metrics-ppc64le-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-ppc64le
|
||||
victoria-metrics-linux-ppc64le-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
victoria-metrics-386-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-386
|
||||
victoria-metrics-linux-386-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-386
|
||||
|
||||
victoria-metrics-darwin-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ victoria-metrics-darwin-amd64-prod:
|
|||
victoria-metrics-darwin-arm64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
victoria-metrics-freebsd-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
victoria-metrics-openbsd-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
package-victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker
|
||||
|
||||
|
@ -64,60 +70,68 @@ run-victoria-metrics:
|
|||
ARGS='-graphiteListenAddr=:2003 -opentsdbListenAddr=:4242 -retentionPeriod=12 -search.maxUniqueTimeseries=1000000 -search.maxQueryDuration=10m' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
victoria-metrics-amd64:
|
||||
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-amd64 ./app/victoria-metrics
|
||||
victoria-metrics-linux-amd64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm ./app/victoria-metrics
|
||||
victoria-metrics-linux-arm:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm64 ./app/victoria-metrics
|
||||
victoria-metrics-linux-arm64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-ppc64le:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-ppc64le ./app/victoria-metrics
|
||||
victoria-metrics-linux-ppc64le:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-386:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-386 ./app/victoria-metrics
|
||||
victoria-metrics-linux-386:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-darwin-amd64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-darwin-arm64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-freebsd-amd64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-openbsd-amd64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-pure:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-local-pure
|
||||
|
||||
### Packaging as DEB - amd64
|
||||
victoria-metrics-package-deb: victoria-metrics-prod
|
||||
victoria-metrics-package-deb-amd64: victoria-metrics-linux-amd64-prod
|
||||
./package/package_deb.sh amd64
|
||||
|
||||
### Packaging as DEB - arm64
|
||||
victoria-metrics-package-deb-arm64: victoria-metrics-arm64-prod
|
||||
victoria-metrics-package-deb-arm: victoria-metrics-linux-arm-prod
|
||||
./package/package_deb.sh arm
|
||||
|
||||
### Packaging as DEB - arm64
|
||||
victoria-metrics-package-deb-arm64: victoria-metrics-linux-arm64-prod
|
||||
./package/package_deb.sh arm64
|
||||
|
||||
### Packaging as DEB - all
|
||||
victoria-metrics-package-deb-all: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb: \
|
||||
victoria-metrics-package-deb-amd64 \
|
||||
victoria-metrics-package-deb-arm \
|
||||
victoria-metrics-package-deb-arm64
|
||||
|
||||
### Packaging as RPM - amd64
|
||||
victoria-metrics-package-rpm: victoria-metrics-prod
|
||||
victoria-metrics-package-rpm-amd64: victoria-metrics-linux-amd64-prod
|
||||
./package/package_rpm.sh amd64
|
||||
|
||||
### Packaging as RPM - arm64
|
||||
victoria-metrics-package-rpm-arm64: victoria-metrics-arm64-prod
|
||||
victoria-metrics-package-rpm-arm64: victoria-metrics-linux-arm64-prod
|
||||
./package/package_rpm.sh arm64
|
||||
|
||||
### Packaging as RPM - all
|
||||
victoria-metrics-package-rpm-all: \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm: \
|
||||
victoria-metrics-package-rpm-amd64 \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
|
||||
### Packaging as both DEB and RPM - all
|
||||
victoria-metrics-package-deb-rpm-all: \
|
||||
victoria-metrics-package-deb-rpm: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb-arm64 \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
|
||||
### Packaging as snap
|
||||
victoria-metrics-package-snap:
|
||||
which snapcraft || snap install snapcraft
|
||||
which multipass || snap install multipass
|
||||
snapcraft
|
||||
|
||||
victoria-metrics-package-rpm
|
||||
|
|
|
@ -12,20 +12,20 @@ vmagent-prod:
|
|||
vmagent-pure-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-pure
|
||||
|
||||
vmagent-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-amd64
|
||||
vmagent-linux-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmagent-arm-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-arm
|
||||
vmagent-linux-arm-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmagent-arm64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-arm64
|
||||
vmagent-linux-arm64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmagent-ppc64le-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-ppc64le
|
||||
vmagent-linux-ppc64le-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmagent-386-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-386
|
||||
vmagent-linux-386-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmagent-darwin-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmagent-darwin-amd64-prod:
|
|||
vmagent-darwin-arm64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmagent-freebsd-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmagent-openbsd-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmagent-windows-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
|
@ -67,26 +73,35 @@ run-vmagent:
|
|||
APP_NAME=vmagent \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
vmagent-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmagent-local-with-goarch
|
||||
vmagent-linux-amd64:
|
||||
APP_NAME=vmagent CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmagent-local-with-goarch
|
||||
vmagent-linux-arm:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmagent-local-with-goarch
|
||||
vmagent-linux-arm64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmagent-local-with-goarch
|
||||
vmagent-linux-ppc64le:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmagent-local-with-goarch
|
||||
vmagent-linux-386:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-local-with-goarch:
|
||||
APP_NAME=vmagent $(MAKE) app-local-with-goarch
|
||||
vmagent-darwin-amd64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-darwin-arm64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-freebsd-amd64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-openbsd-amd64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmagent $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmagent-pure:
|
||||
APP_NAME=vmagent $(MAKE) app-local-pure
|
||||
|
||||
vmagent-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmagent $(MAKE) app-local-windows-with-goarch
|
||||
|
|
|
@ -509,7 +509,15 @@ If each target is scraped by multiple `vmagent` instances, then data deduplicati
|
|||
The `-dedup.minScrapeInterval` must be set to the `scrape_interval` configured at `-promscrape.config`.
|
||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||
|
||||
If multiple `vmagent` clusters scrape the same set of targets, then each cluster must have unique value for the `-promscrape.cluster.name` command-line flag.
|
||||
## High availability
|
||||
|
||||
It is possible to run multiple identically configured `vmagent` instances or `vmagent` [clusters](#scraping-big-number-of-targets),
|
||||
so they [scrape](#how-to-collect-metrics-in-prometheus-format) the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||
|
||||
In this case the deduplication must be configured at VictoriaMetrics in order to de-duplicate samples received from multiple identically configured `vmagent` instances or clusters.
|
||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||
|
||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent` instance or per each `vmagent` cluster in HA setup.
|
||||
This is needed for proper data de-duplication. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||
|
||||
## Scraping targets via a proxy
|
||||
|
@ -809,14 +817,14 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmagent-linux-arm-prod` or `make vmagent-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmagent-linux-arm-prod` or `vmagent-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
## Profiling
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ var (
|
|||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.serice", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
"Defaults to \"aps\"")
|
||||
awsSecretKey = flagutil.NewArray("remoteWrite.aws.secretKey", "Optional AWS SecretKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
)
|
||||
|
|
|
@ -12,20 +12,20 @@ vmalert-prod:
|
|||
vmalert-pure-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-pure
|
||||
|
||||
vmalert-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-amd64
|
||||
vmalert-linux-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmalert-arm-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-arm
|
||||
vmalert-linux-arm-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmalert-arm64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-arm64
|
||||
vmalert-linux-arm64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmalert-ppc64le-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-ppc64le
|
||||
vmalert-linux-ppc64le-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmalert-386-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-386
|
||||
vmalert-linux-386-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmalert-darwin-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmalert-darwin-amd64-prod:
|
|||
vmalert-darwin-arm64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmalert-freebsd-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmalert-openbsd-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmalert-windows-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
|
@ -96,26 +102,35 @@ replay-vmalert: vmalert
|
|||
-replay.timeFrom=2021-05-11T07:21:43Z \
|
||||
-replay.timeTo=2021-05-29T18:40:43Z
|
||||
|
||||
vmalert-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmalert-local-with-goarch
|
||||
vmalert-linux-amd64:
|
||||
APP_NAME=vmalert CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmalert-local-with-goarch
|
||||
vmalert-linux-arm:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmalert-local-with-goarch
|
||||
vmalert-linux-arm64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmalert-local-with-goarch
|
||||
vmalert-linux-ppc64le:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmalert-local-with-goarch
|
||||
vmalert-linux-386:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-local-with-goarch:
|
||||
APP_NAME=vmalert $(MAKE) app-local-with-goarch
|
||||
vmalert-darwin-amd64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-darwin-arm64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-freebsd-amd64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-openbsd-amd64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmalert $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmalert-pure:
|
||||
APP_NAME=vmalert $(MAKE) app-local-pure
|
||||
|
||||
vmalert-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmalert $(MAKE) app-local-windows-with-goarch
|
||||
|
|
|
@ -1108,11 +1108,11 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmalert-arm-prod` or `make vmalert-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-arm-prod` or `vmalert-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmalert-linux-arm-prod` or `make vmalert-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-linux-arm-prod` or `vmalert-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
|
|
@ -63,8 +63,9 @@ type Config struct {
|
|||
}
|
||||
|
||||
// StaticConfig contains list of static targets in the following form:
|
||||
// targets:
|
||||
// [ - '<host>' ]
|
||||
//
|
||||
// targets:
|
||||
// [ - '<host>' ]
|
||||
type StaticConfig struct {
|
||||
Targets []string `yaml:"targets"`
|
||||
}
|
||||
|
|
|
@ -74,9 +74,10 @@ var (
|
|||
|
||||
// Init returns a function for retrieving actual list of Notifier objects.
|
||||
// Init works in two mods:
|
||||
// * configuration via flags (for backward compatibility). Is always static
|
||||
// - configuration via flags (for backward compatibility). Is always static
|
||||
// and don't support live reloads.
|
||||
// * configuration via file. Supports live reloads and service discovery.
|
||||
// - configuration via file. Supports live reloads and service discovery.
|
||||
//
|
||||
// Init returns an error if both mods are used.
|
||||
func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (func() []Notifier, error) {
|
||||
if externalLabels != nil || externalURL != "" {
|
||||
|
|
|
@ -37,7 +37,7 @@ func initLinks() {
|
|||
{"/-/reload", "reload configuration"},
|
||||
}
|
||||
navItems = []tpl.NavItem{
|
||||
{Name: "vmalert", Url: "home"},
|
||||
{Name: "vmalert", Url: "."},
|
||||
{Name: "Groups", Url: "groups"},
|
||||
{Name: "Alerts", Url: "alerts"},
|
||||
{Name: "Notifiers", Url: "notifiers"},
|
||||
|
@ -67,8 +67,9 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/", "/vmalert", "/vmalert/home":
|
||||
case "/", "/vmalert", "/vmalert/":
|
||||
if r.Method != "GET" {
|
||||
httpserver.Errorf(w, r, "path %q supports only GET method", r.URL.Path)
|
||||
return false
|
||||
}
|
||||
WriteWelcome(w, r)
|
||||
|
@ -146,6 +147,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
// TODO: to remove in next versions
|
||||
|
||||
if !strings.HasSuffix(r.URL.Path, "/status") {
|
||||
httpserver.Errorf(w, r, "unsupported path requested: %q ", r.URL.Path)
|
||||
return false
|
||||
}
|
||||
alert, err := rh.alertByPath(strings.TrimPrefix(r.URL.Path, "/api/v1/"))
|
||||
|
|
|
@ -52,7 +52,6 @@ func TestHandler(t *testing.T) {
|
|||
t.Run("/", func(t *testing.T) {
|
||||
getResp(ts.URL, nil, 200)
|
||||
getResp(ts.URL+"/vmalert", nil, 200)
|
||||
getResp(ts.URL+"/vmalert/home", nil, 200)
|
||||
})
|
||||
|
||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
||||
|
|
|
@ -12,20 +12,20 @@ vmauth-prod:
|
|||
vmauth-pure-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-pure
|
||||
|
||||
vmauth-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-amd64
|
||||
vmauth-linux-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmauth-arm-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-arm
|
||||
vmauth-linux-arm-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmauth-arm64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-arm64
|
||||
vmauth-linux-arm64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmauth-ppc64le-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-ppc64le
|
||||
vmauth-linux-ppc64le-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmauth-386-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-386
|
||||
vmauth-linux-386-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmauth-darwin-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmauth-darwin-amd64-prod:
|
|||
vmauth-darwin-arm64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmauth-freebsd-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmauth-openbsd-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmauth-windows-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
|
@ -66,26 +72,35 @@ run-vmauth:
|
|||
ARGS='-auth.config=app/vmauth/example_config.yml' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
vmauth-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmauth-local-with-goarch
|
||||
vmauth-linux-amd64:
|
||||
APP_NAME=vmauth CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmauth-local-with-goarch
|
||||
vmauth-linux-arm:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmauth-local-with-goarch
|
||||
vmauth-linux-arm64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmauth-local-with-goarch
|
||||
vmauth-linux-ppc64le:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmauth-local-with-goarch
|
||||
vmauth-linux-386:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-local-with-goarch:
|
||||
APP_NAME=vmauth $(MAKE) app-local-with-goarch
|
||||
vmauth-darwin-amd64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-darwin-arm64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-freebsd-amd64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-openbsd-amd64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmauth $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmauth-pure:
|
||||
APP_NAME=vmauth $(MAKE) app-local-pure
|
||||
|
||||
vmauth-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmauth $(MAKE) app-local-windows-with-goarch
|
||||
|
|
|
@ -12,20 +12,20 @@ vmbackup-prod:
|
|||
vmbackup-pure-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-pure
|
||||
|
||||
vmbackup-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-amd64
|
||||
vmbackup-linux-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmbackup-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-arm
|
||||
vmbackup-linux-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmbackup-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-arm64
|
||||
vmbackup-linux-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmbackup-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-ppc64le
|
||||
vmbackup-linux-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmbackup-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-386
|
||||
vmbackup-linux-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmbackup-darwin-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmbackup-darwin-amd64-prod:
|
|||
vmbackup-darwin-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmbackup-freebsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmbackup-openbsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
package-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker
|
||||
|
||||
|
@ -57,23 +63,32 @@ package-vmbackup-386:
|
|||
publish-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) publish-via-docker
|
||||
|
||||
vmbackup-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmbackup-local-with-goarch
|
||||
vmbackup-linux-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmbackup-local-with-goarch
|
||||
vmbackup-linux-arm:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmbackup-local-with-goarch
|
||||
vmbackup-linux-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmbackup-local-with-goarch
|
||||
vmbackup-linux-ppc64le:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmbackup-local-with-goarch
|
||||
vmbackup-linux-386:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-local-with-goarch:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-with-goarch
|
||||
vmbackup-darwin-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-freebsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-openbsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-pure
|
||||
|
|
|
@ -12,20 +12,20 @@ vmctl-prod:
|
|||
vmctl-pure-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-pure
|
||||
|
||||
vmctl-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-amd64
|
||||
vmctl-linux-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmctl-arm-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-arm
|
||||
vmctl-linux-arm-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmctl-arm64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-arm64
|
||||
vmctl-linux-arm64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmctl-ppc64le-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-ppc64le
|
||||
vmctl-linux-ppc64le-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmctl-386-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-386
|
||||
vmctl-linux-386-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmctl-darwin-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmctl-darwin-amd64-prod:
|
|||
vmctl-darwin-arm64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmctl-freebsd-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmctl-openbsd-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmctl-windows-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
|
@ -60,27 +66,35 @@ package-vmctl-386:
|
|||
publish-vmctl:
|
||||
APP_NAME=vmctl $(MAKE) publish-via-docker
|
||||
|
||||
vmctl-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmctl-local-with-goarch
|
||||
vmctl-linux-amd64:
|
||||
APP_NAME=vmctl CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmctl-local-with-goarch
|
||||
vmctl-linux-arm:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmctl-local-with-goarch
|
||||
vmctl-linux-arm64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmctl-local-with-goarch
|
||||
vmctl-linux-ppc64le:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmctl-local-with-goarch
|
||||
vmctl-linux-386:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-local-with-goarch:
|
||||
APP_NAME=vmctl $(MAKE) app-local-with-goarch
|
||||
vmctl-darwin-amd64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-darwin-arm64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-freebsd-amd64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-openbsd-amd64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmctl $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmctl-pure:
|
||||
APP_NAME=vmctl $(MAKE) app-local-pure
|
||||
|
||||
vmctl-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmctl $(MAKE) app-local-windows-with-goarch
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ To see the full list of supported modes
|
|||
run the following command:
|
||||
|
||||
```console
|
||||
$ ./vmctl --help
|
||||
$ ./vmctl --help
|
||||
NAME:
|
||||
vmctl - VictoriaMetrics command-line tool
|
||||
|
||||
|
@ -40,9 +40,9 @@ OPTIONS:
|
|||
--influx-addr value InfluxDB server addr (default: "http://localhost:8086")
|
||||
--influx-user value InfluxDB user [$INFLUX_USERNAME]
|
||||
...
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
When importing into the clustered version do not forget to set additionally --vm-account-id flag.
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
When importing into the clustered version do not forget to set additionally --vm-account-id flag.
|
||||
Please note, that vmctl performs initial readiness check for the given address by checking `/health` endpoint. (default: "http://localhost:8428")
|
||||
--vm-user value VictoriaMetrics username for basic auth [$VM_USERNAME]
|
||||
--vm-password value VictoriaMetrics password for basic auth [$VM_PASSWORD]
|
||||
|
@ -107,7 +107,7 @@ $ ./vmctl opentsdb --otsdb-addr http://opentsdb:4242/ --otsdb-retentions sum-1m-
|
|||
OpenTSDB import mode
|
||||
2021/04/09 11:52:50 Will collect data starting at TS 1617990770
|
||||
2021/04/09 11:52:50 Loading all metrics from OpenTSDB for filters: [system]
|
||||
Found 9 metrics to import. Continue? [Y/n]
|
||||
Found 9 metrics to import. Continue? [Y/n]
|
||||
2021/04/09 11:52:51 Starting work on system.load1
|
||||
23 / 402200 [>____________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________] 0.01% 2 p/s
|
||||
```
|
||||
|
@ -280,7 +280,7 @@ InfluxDB import mode
|
|||
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/26 14:23:29 found 12 fields
|
||||
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
```
|
||||
|
||||
The timeseries select query would be following:
|
||||
|
@ -499,10 +499,10 @@ processed and can't show the progress bar. It will show the current processing s
|
|||
--vm-native-filter-match='{job="vmagent"}' \
|
||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||
VictoriaMetrics Native import mode
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
filter: match[]={job="vmagent"}
|
||||
Initing import process to "http://localhost:8428":
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||
```
|
||||
|
||||
|
@ -524,7 +524,7 @@ and specify `accountID` param.
|
|||
|
||||
## Verifying exported blocks from VictoriaMetrics
|
||||
|
||||
In this mode, `vmctl` allows verifying correctness and integrity of data exported via [native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format) from VictoriaMetrics.
|
||||
In this mode, `vmctl` allows verifying correctness and integrity of data exported via [native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format) from VictoriaMetrics.
|
||||
You can verify exported data at disk before uploading it by `vmctl verify-block` command:
|
||||
|
||||
```console
|
||||
|
@ -661,11 +661,11 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmctl-linux-arm-prod` or `make vmctl-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-linux-arm-prod` or `vmctl-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
|
|
@ -54,7 +54,7 @@ func (cw *cWriter) printf(format string, args ...interface{}) {
|
|||
cw.err = err
|
||||
}
|
||||
|
||||
//"{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]}
|
||||
// "{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]}
|
||||
func (ts *TimeSeries) write(w io.Writer) (int, error) {
|
||||
timestamps := ts.Timestamps
|
||||
values := ts.Values
|
||||
|
|
|
@ -12,20 +12,20 @@ vmrestore-prod:
|
|||
vmrestore-pure-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-pure
|
||||
|
||||
vmrestore-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-amd64
|
||||
vmrestore-linux-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmrestore-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-arm
|
||||
vmrestore-linux-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmrestore-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-arm64
|
||||
vmrestore-linux-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmrestore-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-ppc64le
|
||||
vmrestore-linux-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmrestore-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-386
|
||||
vmrestore-linux-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmrestore-darwin-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-amd64
|
||||
|
@ -33,6 +33,12 @@ vmrestore-darwin-amd64-prod:
|
|||
vmrestore-darwin-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmrestore-freebsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmrestore-openbsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
package-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker
|
||||
|
||||
|
@ -57,23 +63,32 @@ package-vmrestore-386:
|
|||
publish-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) publish-via-docker
|
||||
|
||||
vmrestore-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmrestore-local-with-goarch
|
||||
vmrestore-linux-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmrestore-local-with-goarch
|
||||
vmrestore-linux-arm:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmrestore-local-with-goarch
|
||||
vmrestore-linux-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmrestore-local-with-goarch
|
||||
vmrestore-linux-ppc64le:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmrestore-local-with-goarch
|
||||
vmrestore-linux-386:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-local-with-goarch:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-with-goarch
|
||||
vmrestore-darwin-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-freebsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-openbsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-pure
|
||||
|
|
|
@ -161,18 +161,19 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
path = path[len("/graphite"):]
|
||||
}
|
||||
// vmui access.
|
||||
if strings.HasPrefix(path, "/vmui") {
|
||||
r.URL.Path = path
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
if path == "/graph" {
|
||||
// Redirect to /graph/, otherwise vmui redirects to /vmui/, which can be inaccessible in user env.
|
||||
if path == "/vmui" || path == "/graph" {
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
_ = r.ParseForm()
|
||||
newURL := "graph/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusFound)
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
newURL := path + "/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusMovedPermanently)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
r.URL.Path = path
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/graph/") {
|
||||
|
@ -212,7 +213,14 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "/vmalert") {
|
||||
if path == "/vmalert" {
|
||||
// vmalert access via incomplete url without `/` in the end. Redirecto to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
http.Redirect(w, r, "vmalert/", http.StatusMovedPermanently)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmalert/") {
|
||||
vmalertRequests.Inc()
|
||||
if len(*vmalertProxyURL) == 0 {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
|
|
@ -547,14 +547,13 @@ func mergeSortBlocks(dst *Result, sbh sortBlocksHeap, dedupInterval int64) {
|
|||
heap.Init(&sbh)
|
||||
for {
|
||||
top := sbh[0]
|
||||
heap.Pop(&sbh)
|
||||
if len(sbh) == 0 {
|
||||
if len(sbh) == 1 {
|
||||
dst.Timestamps = append(dst.Timestamps, top.Timestamps[top.NextIdx:]...)
|
||||
dst.Values = append(dst.Values, top.Values[top.NextIdx:]...)
|
||||
putSortBlock(top)
|
||||
break
|
||||
}
|
||||
sbNext := sbh[0]
|
||||
sbNext := sbh.getNextBlock()
|
||||
tsNext := sbNext.Timestamps[sbNext.NextIdx]
|
||||
topTimestamps := top.Timestamps
|
||||
topNextIdx := top.NextIdx
|
||||
|
@ -568,8 +567,9 @@ func mergeSortBlocks(dst *Result, sbh sortBlocksHeap, dedupInterval int64) {
|
|||
dst.Values = append(dst.Values, top.Values[topNextIdx:top.NextIdx]...)
|
||||
}
|
||||
if top.NextIdx < len(topTimestamps) {
|
||||
heap.Push(&sbh, top)
|
||||
heap.Fix(&sbh, 0)
|
||||
} else {
|
||||
heap.Pop(&sbh)
|
||||
putSortBlock(top)
|
||||
}
|
||||
}
|
||||
|
@ -637,6 +637,21 @@ func (sb *sortBlock) unpackFrom(tmpBlock *storage.Block, tbf *tmpBlocksFile, br
|
|||
|
||||
type sortBlocksHeap []*sortBlock
|
||||
|
||||
func (sbh sortBlocksHeap) getNextBlock() *sortBlock {
|
||||
if len(sbh) < 2 {
|
||||
return nil
|
||||
}
|
||||
if len(sbh) < 3 {
|
||||
return sbh[1]
|
||||
}
|
||||
a := sbh[1]
|
||||
b := sbh[2]
|
||||
if a.Timestamps[a.NextIdx] <= b.Timestamps[b.NextIdx] {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (sbh sortBlocksHeap) Len() int {
|
||||
return len(sbh)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
|||
benchmarkMergeSortBlocks(b, blocks)
|
||||
})
|
||||
}
|
||||
b.Run("overlapped-blocks", func(b *testing.B) {
|
||||
b.Run("overlapped-blocks-bestcase", func(b *testing.B) {
|
||||
const samplesPerBlock = 8192
|
||||
var blocks []*sortBlock
|
||||
for j := 0; j < 10; j++ {
|
||||
|
@ -51,6 +51,33 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
|||
}
|
||||
benchmarkMergeSortBlocks(b, blocks)
|
||||
})
|
||||
b.Run("overlapped-blocks-worstcase", func(b *testing.B) {
|
||||
const samplesPerBlock = 8192
|
||||
var blocks []*sortBlock
|
||||
for j := 0; j < 5; j++ {
|
||||
timestamps := make([]int64, samplesPerBlock)
|
||||
values := make([]float64, samplesPerBlock)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(2 * (j*samplesPerBlock + i))
|
||||
values[i] = float64(2 * (j*samplesPerBlock + i))
|
||||
}
|
||||
blocks = append(blocks, &sortBlock{
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
})
|
||||
timestamps = make([]int64, samplesPerBlock)
|
||||
values = make([]float64, samplesPerBlock)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(2*(j*samplesPerBlock+i) + 1)
|
||||
values[i] = float64(2*(j*samplesPerBlock+i) + 1)
|
||||
}
|
||||
blocks = append(blocks, &sortBlock{
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
})
|
||||
}
|
||||
benchmarkMergeSortBlocks(b, blocks)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkMergeSortBlocks(b *testing.B, blocks []*sortBlock) {
|
||||
|
|
|
@ -609,6 +609,10 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
if cp.start == 0 {
|
||||
cp.start = cp.end - defaultStep
|
||||
}
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
||||
metricNames, err := netstorage.SearchMetricNames(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
|
@ -617,6 +621,9 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
if limit > 0 && limit < len(metricNames) {
|
||||
metricNames = metricNames[:limit]
|
||||
}
|
||||
qtDone := func() {
|
||||
qt.Donef("start=%d, end=%d", cp.start, cp.end)
|
||||
}
|
||||
|
@ -1036,7 +1043,6 @@ func (cp *commonParams) IsDefaultTimeRange() bool {
|
|||
// - match[]
|
||||
// - extra_label
|
||||
// - extra_filters[]
|
||||
//
|
||||
func getExportParams(r *http.Request, startTime time.Time) (*commonParams, error) {
|
||||
cp, err := getCommonParams(r, startTime, true)
|
||||
if err != nil {
|
||||
|
@ -1054,7 +1060,6 @@ func getExportParams(r *http.Request, startTime time.Time) (*commonParams, error
|
|||
// - match[]
|
||||
// - extra_label
|
||||
// - extra_filters[]
|
||||
//
|
||||
func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
|
|
|
@ -292,14 +292,14 @@ func evalExprInternal(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr)
|
|||
}
|
||||
|
||||
func evalTransformFunc(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]*timeseries, error) {
|
||||
args, err := evalExprs(qt, ec, fe.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tf := getTransformFunc(fe.Name)
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
|
||||
}
|
||||
args, err := evalExprs(qt, ec, fe.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tfa := &transformFuncArg{
|
||||
ec: ec,
|
||||
fe: fe,
|
||||
|
|
|
@ -520,7 +520,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
|||
// Extend dstValues in order to remove mallocs below.
|
||||
dstValues = decimal.ExtendFloat64sCapacity(dstValues, len(rc.Timestamps))
|
||||
|
||||
scrapeInterval := getScrapeInterval(timestamps)
|
||||
scrapeInterval := getScrapeInterval(timestamps, rc.Step)
|
||||
maxPrevInterval := getMaxPrevInterval(scrapeInterval)
|
||||
if rc.LookbackDelta > 0 && maxPrevInterval > rc.LookbackDelta {
|
||||
maxPrevInterval = rc.LookbackDelta
|
||||
|
@ -644,9 +644,11 @@ func binarySearchInt64(a []int64, v int64) uint {
|
|||
return i
|
||||
}
|
||||
|
||||
func getScrapeInterval(timestamps []int64) int64 {
|
||||
func getScrapeInterval(timestamps []int64, defaultInterval int64) int64 {
|
||||
if len(timestamps) < 2 {
|
||||
return int64(maxSilenceInterval)
|
||||
// can't calculate scrape interval with less than 2 timestamps
|
||||
// return defaultInterval
|
||||
return defaultInterval
|
||||
}
|
||||
|
||||
// Estimate scrape interval as 0.6 quantile for the first 20 intervals.
|
||||
|
@ -665,7 +667,7 @@ func getScrapeInterval(timestamps []int64) int64 {
|
|||
a.A = intervals
|
||||
putFloat64s(a)
|
||||
if scrapeInterval <= 0 {
|
||||
return int64(maxSilenceInterval)
|
||||
return defaultInterval
|
||||
}
|
||||
return scrapeInterval
|
||||
}
|
||||
|
|
|
@ -219,10 +219,13 @@ func (d *Deadline) String() string {
|
|||
//
|
||||
// Label filters can be present in extra_label and extra_filters[] query args.
|
||||
// They are combined. For example, the following query args:
|
||||
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||
//
|
||||
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||
//
|
||||
// should be translated to the following filters joined with "or":
|
||||
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||
//
|
||||
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
var tagFilters []storage.TagFilter
|
||||
for _, match := range r.Form["extra_label"] {
|
||||
|
|
|
@ -850,6 +850,10 @@ func registerStorageMetrics(strg *storage.Storage) {
|
|||
metrics.NewGauge(`vm_cache_collisions_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheCollisions)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_next_retention_seconds`, func() float64 {
|
||||
return float64(m().NextRetentionSeconds)
|
||||
})
|
||||
}
|
||||
|
||||
func jsonResponseError(w http.ResponseWriter, err error) {
|
||||
|
|
|
@ -3,8 +3,8 @@ COPY build /build
|
|||
|
||||
WORKDIR /build
|
||||
COPY web/ /build/
|
||||
RUN GOOS=linux GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.16.0
|
||||
USER root
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
)
|
||||
|
||||
// specific files
|
||||
//go:embed favicon-32x32.png robots.txt index.html manifest.json asset-manifest.json
|
||||
// static content
|
||||
//
|
||||
//go:embed favicon-32x32.png robots.txt index.html manifest.json asset-manifest.json
|
||||
//go:embed static
|
||||
var files embed.FS
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ app-via-docker: package-builder
|
|||
-w /VictoriaMetrics \
|
||||
--mount type=bind,src="$(shell pwd)/gocache-for-docker",dst=/gocache \
|
||||
--env GOCACHE=/gocache \
|
||||
--env GO111MODULE=on \
|
||||
$(DOCKER_OPTS) \
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -mod=vendor -trimpath -buildvcs=false \
|
||||
|
@ -47,7 +46,6 @@ app-via-docker-windows: package-builder
|
|||
-w /VictoriaMetrics \
|
||||
--mount type=bind,src="$(shell pwd)/gocache-for-docker",dst=/gocache \
|
||||
--env GOCACHE=/gocache \
|
||||
--env GO111MODULE=on \
|
||||
$(DOCKER_OPTS) \
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -mod=vendor -trimpath -buildvcs=false \
|
||||
|
@ -65,11 +63,11 @@ package-via-docker: package-base
|
|||
-f app/$(APP_NAME)/deployment/Dockerfile bin)
|
||||
|
||||
publish-via-docker: \
|
||||
app-via-docker-amd64 \
|
||||
app-via-docker-arm \
|
||||
app-via-docker-arm64 \
|
||||
app-via-docker-ppc64le \
|
||||
app-via-docker-386
|
||||
app-via-docker-linux-amd64 \
|
||||
app-via-docker-linux-arm \
|
||||
app-via-docker-linux-arm64 \
|
||||
app-via-docker-linux-ppc64le \
|
||||
app-via-docker-linux-386
|
||||
docker buildx build \
|
||||
--platform=linux/amd64,linux/arm,linux/arm64,linux/ppc64le,linux/386 \
|
||||
--build-arg certs_image=$(CERTS_IMAGE) \
|
||||
|
@ -89,62 +87,51 @@ run-via-docker: package-via-docker
|
|||
$(DOCKER_OPTS) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE) $(ARGS)
|
||||
|
||||
app-via-docker-goarch:
|
||||
APP_SUFFIX='-$(GOARCH)' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=linux --env GOARCH=$(GOARCH)' \
|
||||
app-via-docker-goos-goarch:
|
||||
APP_SUFFIX='-$(GOOS)-$(GOARCH)' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=$(GOOS) --env GOARCH=$(GOARCH)' \
|
||||
$(MAKE) app-via-docker
|
||||
|
||||
app-via-docker-pure:
|
||||
APP_SUFFIX='-pure' DOCKER_OPTS='--env CGO_ENABLED=0' $(MAKE) app-via-docker
|
||||
|
||||
app-via-docker-linux-amd64:
|
||||
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-linux-arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-linux-arm64:
|
||||
ifeq ($(APP_NAME),vmagent)
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-via-docker-goos-goarch
|
||||
else
|
||||
APP_SUFFIX='-linux-arm64' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=1 --env GOOS=linux --env GOARCH=arm64 --env CC=/opt/cross-builder/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc' \
|
||||
$(MAKE) app-via-docker
|
||||
endif
|
||||
|
||||
app-via-docker-linux-ppc64le:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-linux-386:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-darwin-amd64:
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-darwin-arm64:
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-goos-goarch:
|
||||
APP_SUFFIX='-$(GOOS)-$(GOARCH)' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=$(CGO_ENABLED) --env GOOS=$(GOOS) --env GOARCH=$(GOARCH)' \
|
||||
$(MAKE) app-via-docker
|
||||
app-via-docker-freebsd-amd64:
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-goarch-arm64:
|
||||
APP_SUFFIX='-arm64' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=1 --env GOOS=linux --env GOARCH=arm64 --env CC=/opt/cross-builder/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc' \
|
||||
$(MAKE) app-via-docker
|
||||
|
||||
app-via-docker-windows-goarch:
|
||||
APP_SUFFIX='-$(GOARCH)' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=0 --env GOOS=windows --env GOARCH=$(GOARCH)' \
|
||||
$(MAKE) app-via-docker-windows
|
||||
|
||||
app-via-docker-goarch-cgo:
|
||||
CGO_ENABLED=1 $(MAKE) app-via-docker-goarch
|
||||
|
||||
app-via-docker-goarch-nocgo:
|
||||
CGO_ENABLED=0 $(MAKE) app-via-docker-goarch
|
||||
|
||||
app-via-docker-pure:
|
||||
APP_SUFFIX='-pure' DOCKER_OPTS='--env CGO_ENABLED=0' $(MAKE) app-via-docker
|
||||
|
||||
app-via-docker-amd64:
|
||||
GOARCH=amd64 $(MAKE) app-via-docker-goarch-cgo
|
||||
|
||||
app-via-docker-arm:
|
||||
GOARCH=arm $(MAKE) app-via-docker-goarch-nocgo
|
||||
|
||||
app-via-docker-arm64:
|
||||
ifeq ($(APP_NAME),vmagent)
|
||||
GOARCH=arm64 $(MAKE) app-via-docker-goarch-nocgo
|
||||
else
|
||||
$(MAKE) app-via-docker-goarch-arm64
|
||||
endif
|
||||
|
||||
app-via-docker-ppc64le:
|
||||
GOARCH=ppc64le $(MAKE) app-via-docker-goarch-nocgo
|
||||
|
||||
app-via-docker-386:
|
||||
GOARCH=386 $(MAKE) app-via-docker-goarch-nocgo
|
||||
app-via-docker-openbsd-amd64:
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
|
||||
|
||||
app-via-docker-windows-amd64:
|
||||
GOARCH=amd64 $(MAKE) app-via-docker-windows-goarch
|
||||
APP_SUFFIX='-$(GOARCH)' \
|
||||
DOCKER_OPTS='--env CGO_ENABLED=0 --env GOOS=windows --env GOARCH=amd64' \
|
||||
$(MAKE) app-via-docker-windows
|
||||
|
||||
package-via-docker-goarch:
|
||||
APP_SUFFIX='-$(GOARCH)' \
|
||||
|
|
|
@ -74,7 +74,7 @@ groups:
|
|||
sum(vm_data_size_bytes{type!="indexdb"}) /
|
||||
sum(vm_rows{type!="indexdb"})
|
||||
)
|
||||
) < 3 * 24 * 3600
|
||||
) < 3 * 24 * 3600 > 0
|
||||
for: 30m
|
||||
labels:
|
||||
severity: critical
|
||||
|
|
|
@ -40,7 +40,7 @@ services:
|
|||
restart: always
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana:8.5.1
|
||||
image: grafana/grafana:9.0.2
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
|
|
@ -51,6 +51,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
|||
* [Install and configure VictoriaMetrics on Debian](https://www.vultr.com/docs/install-and-configure-victoriametrics-on-debian)
|
||||
* [Superset BI with Victoria Metrics](https://cer6erus.medium.com/superset-bi-with-victoria-metrics-a109d3e91bc6)
|
||||
* [VictoriaMetrics Source Code Analysis - Bloom filter](https://www.sobyte.net/post/2022-05/victoriametrics-bloomfilter/)
|
||||
* [How we tried using VictoriaMetrics and Thanos at the same time](https://habr.com/ru/company/sravni/blog/672908/)
|
||||
|
||||
## Our articles
|
||||
|
||||
|
|
|
@ -15,8 +15,9 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
|
||||
## tip
|
||||
|
||||
**Update note1:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric. If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
|
||||
**Update note2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) adds `/vmalert/` prefix to [web urls](https://docs.victoriametrics.com/vmalert.html#web) according to [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). This may affect `vmalert` instances with non-empty `-http.pathPrefix` command-line flag. After the update, configuring this flag is no longer needed. Here's [why](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2799#issuecomment-1171392005).
|
||||
**Update note 1:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric. If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
|
||||
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) adds `/vmalert/` prefix to [web urls](https://docs.victoriametrics.com/vmalert.html#web) according to [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). This may affect `vmalert` instances with non-empty `-http.pathPrefix` command-line flag. After the update, configuring this flag is no longer needed. Here's [why](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2799#issuecomment-1171392005).
|
||||
**Update note 3:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics because of added ability to query `vmselect` data from other `vmselect` nodes - see [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup), so read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases at any time.
|
||||
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): deprecate alert's status link `/api/v1/<groupID>/<alertID>/status` in favour of `api/v1/alert?group_id=<group_id>&alert_id=<alert_id>"`. The old alert's status link is still supported, but will be removed in future releases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825).
|
||||
* FEATURE: [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add support for querying lower-level `vmselect` nodes from upper-level `vmselect` nodes. This makes possible to build multi-level cluster setups for global querying view and HA purposes without the need to use [Promxy](https://github.com/jacksontj/promxy). See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2778).
|
||||
|
@ -37,10 +38,12 @@ scrape_configs:
|
|||
- targets: ["host123:8080"]
|
||||
```
|
||||
|
||||
* FEATURE: add ability to pass `limit` query arg to `api/v1/series` endpoint. This can be used if only a sample of up to `limit` series must be returned from the endpoint. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2841) and [these docs](https://docs.victoriametrics.com/#prometheus-querying-api-enhancements).
|
||||
* FEATURE: [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing): show timestamps in query traces in human-readable format (aka `RFC3339` in UTC timezone) instead of milliseconds since Unix epoch. For example, `2022-06-27T10:32:54.506Z` instead of `1656325974506`. This improves traces' readability.
|
||||
* FEATURE: improve performance of [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers) requests, which return big number of time series.
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve query performance when [replication is enabled](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#replication-and-data-safety).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle partial counter resets in [remove_resets](https://docs.victoriametrics.com/MetricsQL.html#remove_resets) function. Now `remove_resets(sum(m))` should returns the expected increasing line when some time series matching `m` disappear on the selected time range. Previously such a query would return horizontal line after the disappeared series.
|
||||
* FEATURE: expose `vm_next_retention_seconds` metric at `http://victoriametrics:8428/metrics`, which shows the number of seconds left until the next `indexdb` rotation. Thanks to @guidao for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2863).
|
||||
* FEATURE: expose additional histogram metrics at `http://victoriametrics:8428/metrics`, which may help understanding query workload:
|
||||
|
||||
* `vm_rows_read_per_query` - the number of raw samples read per query.
|
||||
|
@ -48,6 +51,9 @@ scrape_configs:
|
|||
* `vm_rows_read_per_series` - the number of raw samples read per queried series.
|
||||
* `vm_series_read_per_query` - the number of series read per query.
|
||||
|
||||
* FEATURE: publish binaries for FreeBSD and OpenBSD at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
* BUGFIX: consistently name binaries at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) in the form `$(APP_NAME)-$(GOOS)-$(GOARCH)-$(VERSION).tar.gz`. For example, `victoria-metrics-linux-amd64-v1.79.0.tar.gz`. Previously the `$(GOOS)` part was missing in binaries for Linux.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow using `__name__` label (aka [metric name](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)) in alerting annotations. For example:
|
||||
|
||||
{% raw %}
|
||||
|
@ -83,7 +89,7 @@ Released at 20-06-2022
|
|||
|
||||
**Warning (03-07-2022):** VictoriaMetrics v1.78.0 contains a bug, which may result in missing time series during queries. It is recommended upgrading to [v1.78.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.78.1), which fixes the bug.
|
||||
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics because of added [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing), so `vmselect` and `vmstorage` nodes will experience communication errors and read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics because of added [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing), so read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
|
||||
* SECURITY: add `-flagsAuthKey` command-line flag for protecting `/flags` endpoint from unauthorized access. Though this endpoint already hides values for command-line flags with `key` and `password` substrings in their names, other sensitive information could be exposed there. See [This issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2753).
|
||||
|
||||
|
@ -205,7 +211,7 @@ Released at 05-05-2022
|
|||
|
||||
Released at 12-04-2022
|
||||
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so `vmselect` and `vmstorage` nodes will experience communication errors and read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for `alert_relabel_configs` option at `-notifier.config`. This option allows configuring relabeling rules for alerts before sending them to configured notifiers. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file) for details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmalert.html): allow passing StatefulSet pod names to `-promscrape.cluster.memberNum` command-line flag. In this case the member number is automatically extracted from the pod name, which must end with the number in the range `0 ... promscrape.cluster.membersCount-1`. For example, `vmagent-0`, `vmagent-1`, etc. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2359) and [these docs](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
|
@ -221,7 +227,7 @@ Released at 12-04-2022
|
|||
|
||||
Released at 07-04-2022
|
||||
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so `vmselect` and `vmstorage` nodes will experience communication errors and read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to verify files obtained via [native export](https://docs.victoriametrics.com/#how-to-export-data-in-native-format). See [these docs](https://docs.victoriametrics.com/vmctl.html#verifying-exported-blocks-from-victoriametrics) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2362).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add pre-defined dashboards for per-job CPU usage, memory usage and disk IO usage. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2243) for details.
|
||||
|
|
|
@ -406,7 +406,7 @@ The replication can be enabled by passing `-replicationFactor=N` command-line fl
|
|||
|
||||
The cluster must contain at least `2*N-1` `vmstorage` nodes, where `N` is replication factor, in order to maintain the given replication factor for newly ingested data when `N-1` of storage nodes are unavailable.
|
||||
|
||||
When the replication is enabled, `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes, so they could de-duplicate replicated samples obtained from distinct `vmstorage` nodes during querying. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to bigger values according to [deduplication docs](#deduplication).
|
||||
VictoriaMetrics stores timestamps with millisecond precision, so `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes when the replication is enabled, so they could de-duplicate replicated samples obtained from distinct `vmstorage` nodes during querying. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to `scrape_interval` from scrape configs according to [deduplication docs](#deduplication).
|
||||
|
||||
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883), so it is recommended performing regular backups. See [these docs](#backups) for details.
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ Just download VictoriaMetrics and follow
|
|||
Then read [Prometheus setup](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-setup)
|
||||
and [Grafana setup](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup) docs.
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics).
|
||||
|
||||
|
||||
### Starting VM-Single via Docker
|
||||
|
||||
|
@ -169,4 +171,4 @@ and [backups](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.htm
|
|||
|
||||
To avoid excessive resource usage or performance degradation limits must be in place:
|
||||
* [Resource usage limits](https://docs.victoriametrics.com/FAQ.html#how-to-set-a-memory-limit-for-victoriametrics-components);
|
||||
* [Cardinality limiter](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cardinality-limiter).
|
||||
* [Cardinality limiter](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cardinality-limiter).
|
||||
|
|
|
@ -15,18 +15,21 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t
|
|||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Just download VictoriaMetrics and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
Just download [the latest version of VictoriaMetrics](https://docs.victoriametrics.com/CHANGELOG.html)
|
||||
and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
|
||||
The cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/products/enterprise/).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
## Prominent features
|
||||
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
@ -110,12 +113,21 @@ The following command-line flags are used the most:
|
|||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
The following docs may be useful during initial VictoriaMetrics setup:
|
||||
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to ingest data to VictoriaMetrics](#how-to-import-time-series-data)
|
||||
* [How to set up Prometheus to write data to VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-setup)
|
||||
* [How to query VictoriaMetrics via Grafana](#grafana-setup)
|
||||
* [How to query VictoriaMetrics via Graphite API](#graphite-api-usage)
|
||||
* [How to handle alerts](#alerting)
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
@ -228,6 +240,8 @@ Then build graphs and dashboards for the created datasource using [PromQL](https
|
|||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
@ -636,6 +650,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
|||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
Additionally, VictoriaMetrics provides the following handlers:
|
||||
|
||||
|
@ -730,14 +745,14 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-arm-prod` or `make victoria-metrics-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm-prod` or `victoria-metrics-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm-prod` or `make victoria-metrics-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm-prod` or `victoria-metrics-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Pure Go build (CGO_ENABLED=0)
|
||||
|
||||
|
@ -1255,7 +1270,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||
|
||||
|
@ -1263,6 +1278,8 @@ The de-duplication reduces disk space usage if multiple identically configured [
|
|||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
|
||||
It is recommended passing different `-promscrape.cluster.name` values to HA paris of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
## Storage
|
||||
|
||||
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
|
||||
|
|
|
@ -19,18 +19,21 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t
|
|||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Just download VictoriaMetrics and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
Just download [the latest version of VictoriaMetrics](https://docs.victoriametrics.com/CHANGELOG.html)
|
||||
and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
|
||||
The cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[QuickStart guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://victoriametrics.com/products/enterprise/).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
## Prominent features
|
||||
|
||||
VictoriaMetrics has the following prominent features:
|
||||
|
@ -114,12 +117,21 @@ The following command-line flags are used the most:
|
|||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
See how to [ingest data to VictoriaMetrics](#how-to-import-time-series-data), how to [query VictoriaMetrics via Grafana](#grafana-setup), how to [query VictoriaMetrics via Graphite API](#graphite-api-usage) and how to [handle alerts](#alerting).
|
||||
The following docs may be useful during initial VictoriaMetrics setup:
|
||||
* [How to set up scraping of Prometheus-compatible targets](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
* [How to ingest data to VictoriaMetrics](#how-to-import-time-series-data)
|
||||
* [How to set up Prometheus to write data to VictoriaMetrics](https://docs.victoriametrics.com/#prometheus-setup)
|
||||
* [How to query VictoriaMetrics via Grafana](#grafana-setup)
|
||||
* [How to query VictoriaMetrics via Graphite API](#graphite-api-usage)
|
||||
* [How to handle alerts](#alerting)
|
||||
|
||||
VictoriaMetrics accepts [Prometheus querying API requests](#prometheus-querying-api-usage) on port `8428` by default.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) and performing [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
|
||||
### Environment variables
|
||||
|
||||
Each flag value can be set via environment variables according to these rules:
|
||||
|
@ -232,6 +244,8 @@ Then build graphs and dashboards for the created datasource using [PromQL](https
|
|||
|
||||
## How to upgrade VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended periodically checking [the CHANGELOG page](https://docs.victoriametrics.com/CHANGELOG.html) and performing regular upgrades.
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is safe skipping multiple versions during the upgrade unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise. It is recommended performing regular upgrades to the latest version, since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
It is also safe downgrading to older versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) say otherwise.
|
||||
|
@ -640,6 +654,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
|||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
Additionally, VictoriaMetrics provides the following handlers:
|
||||
|
||||
|
@ -734,14 +749,14 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-arm-prod` or `make victoria-metrics-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-arm-prod` or `victoria-metrics-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make victoria-metrics-linux-arm-prod` or `make victoria-metrics-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm-prod` or `victoria-metrics-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Pure Go build (CGO_ENABLED=0)
|
||||
|
||||
|
@ -1259,7 +1274,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||
|
||||
|
@ -1267,6 +1282,8 @@ The de-duplication reduces disk space usage if multiple identically configured [
|
|||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
|
||||
It is recommended passing different `-promscrape.cluster.name` values to HA paris of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
## Storage
|
||||
|
||||
VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
|
||||
|
|
|
@ -28,12 +28,12 @@ The expected output should return [HTTP Status 204](https://datatracker.ietf.org
|
|||
> Host: 127.0.0.1:8428
|
||||
> User-Agent: curl/7.81.0
|
||||
> Accept: */*
|
||||
>
|
||||
>
|
||||
* Mark bundle as not supporting multiuse
|
||||
< HTTP/1.1 204 No Content
|
||||
< X-Server-Hostname: eba075fb0e1a
|
||||
< Date: Tue, 21 Jun 2022 07:33:35 GMT
|
||||
<
|
||||
<
|
||||
* Connection #0 to host 127.0.0.1 left intact
|
||||
```
|
||||
|
||||
|
@ -59,12 +59,12 @@ The expected output should return [HTTP Status 204](https://datatracker.ietf.org
|
|||
> Host: 127.0.0.1:8481
|
||||
> User-Agent: curl/7.81.0
|
||||
> Accept: */*
|
||||
>
|
||||
>
|
||||
* Mark bundle as not supporting multiuse
|
||||
< HTTP/1.1 204 No Content
|
||||
< X-Server-Hostname: 101ed7a45c94
|
||||
< Date: Tue, 21 Jun 2022 07:21:36 GMT
|
||||
<
|
||||
<
|
||||
* Connection #0 to host 127.0.0.1 left intact
|
||||
```
|
||||
|
||||
|
@ -401,6 +401,7 @@ Additional information:
|
|||
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
||||
* [Finding series by label matchers](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||
|
||||
## /api/v1/status/tsdb
|
||||
|
||||
|
|
|
@ -513,7 +513,15 @@ If each target is scraped by multiple `vmagent` instances, then data deduplicati
|
|||
The `-dedup.minScrapeInterval` must be set to the `scrape_interval` configured at `-promscrape.config`.
|
||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||
|
||||
If multiple `vmagent` clusters scrape the same set of targets, then each cluster must have unique value for the `-promscrape.cluster.name` command-line flag.
|
||||
## High availability
|
||||
|
||||
It is possible to run multiple identically configured `vmagent` instances or `vmagent` [clusters](#scraping-big-number-of-targets),
|
||||
so they [scrape](#how-to-collect-metrics-in-prometheus-format) the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||
|
||||
In this case the deduplication must be configured at VictoriaMetrics in order to de-duplicate samples received from multiple identically configured `vmagent` instances or clusters.
|
||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||
|
||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent` instance or per each `vmagent` cluster in HA setup.
|
||||
This is needed for proper data de-duplication. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||
|
||||
## Scraping targets via a proxy
|
||||
|
@ -813,14 +821,14 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmagent-linux-arm` or `make vmagent-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
It builds `vmagent-linux-arm` or `vmagent-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmagent-linux-arm-prod` or `make vmagent-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmagent-linux-arm-prod` or `vmagent-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
## Profiling
|
||||
|
||||
|
|
|
@ -1112,11 +1112,11 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmalert-arm` or `make vmalert-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-arm` or `vmalert-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmalert-linux-arm` or `make vmalert-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-linux-arm` or `vmalert-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmalert-arm-prod` or `make vmalert-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-arm-prod` or `vmalert-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmalert-linux-arm-prod` or `make vmalert-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmalert-linux-arm-prod` or `vmalert-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
|
|
@ -20,7 +20,7 @@ To see the full list of supported modes
|
|||
run the following command:
|
||||
|
||||
```console
|
||||
$ ./vmctl --help
|
||||
$ ./vmctl --help
|
||||
NAME:
|
||||
vmctl - VictoriaMetrics command-line tool
|
||||
|
||||
|
@ -44,9 +44,9 @@ OPTIONS:
|
|||
--influx-addr value InfluxDB server addr (default: "http://localhost:8086")
|
||||
--influx-user value InfluxDB user [$INFLUX_USERNAME]
|
||||
...
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
When importing into the clustered version do not forget to set additionally --vm-account-id flag.
|
||||
--vm-addr vmctl VictoriaMetrics address to perform import requests.
|
||||
Should be the same as --httpListenAddr value for single-node version or vminsert component.
|
||||
When importing into the clustered version do not forget to set additionally --vm-account-id flag.
|
||||
Please note, that vmctl performs initial readiness check for the given address by checking `/health` endpoint. (default: "http://localhost:8428")
|
||||
--vm-user value VictoriaMetrics username for basic auth [$VM_USERNAME]
|
||||
--vm-password value VictoriaMetrics password for basic auth [$VM_PASSWORD]
|
||||
|
@ -111,7 +111,7 @@ $ ./vmctl opentsdb --otsdb-addr http://opentsdb:4242/ --otsdb-retentions sum-1m-
|
|||
OpenTSDB import mode
|
||||
2021/04/09 11:52:50 Will collect data starting at TS 1617990770
|
||||
2021/04/09 11:52:50 Loading all metrics from OpenTSDB for filters: [system]
|
||||
Found 9 metrics to import. Continue? [Y/n]
|
||||
Found 9 metrics to import. Continue? [Y/n]
|
||||
2021/04/09 11:52:51 Starting work on system.load1
|
||||
23 / 402200 [>____________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________] 0.01% 2 p/s
|
||||
```
|
||||
|
@ -284,7 +284,7 @@ InfluxDB import mode
|
|||
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/26 14:23:29 found 12 fields
|
||||
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
```
|
||||
|
||||
The timeseries select query would be following:
|
||||
|
@ -503,10 +503,10 @@ processed and can't show the progress bar. It will show the current processing s
|
|||
--vm-native-filter-match='{job="vmagent"}' \
|
||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||
VictoriaMetrics Native import mode
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
filter: match[]={job="vmagent"}
|
||||
Initing import process to "http://localhost:8428":
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||
```
|
||||
|
||||
|
@ -528,7 +528,7 @@ and specify `accountID` param.
|
|||
|
||||
## Verifying exported blocks from VictoriaMetrics
|
||||
|
||||
In this mode, `vmctl` allows verifying correctness and integrity of data exported via [native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format) from VictoriaMetrics.
|
||||
In this mode, `vmctl` allows verifying correctness and integrity of data exported via [native format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-native-format) from VictoriaMetrics.
|
||||
You can verify exported data at disk before uploading it by `vmctl verify-block` command:
|
||||
|
||||
```console
|
||||
|
@ -665,11 +665,11 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
|||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.17.
|
||||
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmctl-linux-arm` or `make vmctl-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-linux-arm` or `vmctl-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
2. Run `make vmctl-linux-arm-prod` or `make vmctl-linux-arm64-prod` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `vmctl-linux-arm-prod` or `vmctl-linux-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
|
12
go.mod
12
go.mod
|
@ -11,7 +11,7 @@ require (
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||
github.com/aws/aws-sdk-go v1.44.51
|
||||
github.com/aws/aws-sdk-go v1.44.53
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
||||
|
@ -23,7 +23,7 @@ require (
|
|||
github.com/go-kit/kit v0.12.0
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.8
|
||||
github.com/klauspost/compress v1.15.7
|
||||
github.com/klauspost/compress v1.15.8
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
|
@ -37,8 +37,8 @@ require (
|
|||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d
|
||||
google.golang.org/api v0.86.0
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e
|
||||
google.golang.org/api v0.87.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
@ -76,7 +76,7 @@ require (
|
|||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220711132622-b6f31b0ceb50 // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
)
|
||||
|
|
22
go.sum
22
go.sum
|
@ -146,8 +146,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.51 h1:jO9hoLynZOrMM4dj0KjeKIK+c6PA+HQbKoHOkAEye2Y=
|
||||
github.com/aws/aws-sdk-go v1.44.51/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.53 h1:2MErE8gRyBLuE1fuH2Sqlj1xoN3S6/jXb0aO/A1jGfk=
|
||||
github.com/aws/aws-sdk-go v1.44.53/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -573,8 +573,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
|||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
|
||||
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
|
||||
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
|
@ -1137,8 +1137,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d h1:/m5NbqQelATgoSPVC2Z23sR4kVNokFwDDyWh/3rGY+I=
|
||||
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1285,8 +1285,9 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r
|
|||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.87.0 h1:pUQVF/F+X7Tl1lo4LJoJf5BOpjtmINU80p9XpYTU2p4=
|
||||
google.golang.org/api v0.87.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1380,8 +1381,8 @@ google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljW
|
|||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220711132622-b6f31b0ceb50 h1:gyHXMCq6jOxTS4ywai4Ht8kjJcDRxkmtCJERlZ3nGms=
|
||||
google.golang.org/genproto v0.0.0-20220711132622-b6f31b0ceb50/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1417,8 +1418,9 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
|
|||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
|
@ -51,13 +51,12 @@ func NewArrayInt(name string, description string) *ArrayInt {
|
|||
//
|
||||
// The following example sets equivalent flag array with two items (value1, value2):
|
||||
//
|
||||
// -foo=value1 -foo=value2
|
||||
// -foo=value1,value2
|
||||
// -foo=value1 -foo=value2
|
||||
// -foo=value1,value2
|
||||
//
|
||||
// Flag values may be quoted. For instance, the following arg creates an array of ("a", "b, c") items:
|
||||
//
|
||||
// -foo='a,"b, c"'
|
||||
//
|
||||
// -foo='a,"b, c"'
|
||||
type Array []string
|
||||
|
||||
// String implements flag.Value interface
|
||||
|
|
|
@ -9,11 +9,11 @@ import (
|
|||
|
||||
// pools contains pools for byte slices of various capacities.
|
||||
//
|
||||
// pools[0] is for capacities from 0 to 8
|
||||
// pools[1] is for capacities from 9 to 16
|
||||
// pools[2] is for capacities from 17 to 32
|
||||
// ...
|
||||
// pools[n] is for capacities from 2^(n+2)+1 to 2^(n+3)
|
||||
// pools[0] is for capacities from 0 to 8
|
||||
// pools[1] is for capacities from 9 to 16
|
||||
// pools[2] is for capacities from 17 to 32
|
||||
// ...
|
||||
// pools[n] is for capacities from 2^(n+2)+1 to 2^(n+3)
|
||||
//
|
||||
// Limit the maximum capacity to 2^18, since there are no performance benefits
|
||||
// in caching byte slices with bigger capacities.
|
||||
|
|
|
@ -114,12 +114,13 @@ again:
|
|||
default:
|
||||
}
|
||||
|
||||
bsr := heap.Pop(&bsm.bsrHeap).(*blockStreamReader)
|
||||
bsr := bsm.bsrHeap[0]
|
||||
|
||||
var nextItem []byte
|
||||
hasNextItem := false
|
||||
if len(bsm.bsrHeap) > 0 {
|
||||
nextItem = bsm.bsrHeap[0].bh.firstItem
|
||||
if len(bsm.bsrHeap) > 1 {
|
||||
bsr := bsm.bsrHeap.getNextReader()
|
||||
nextItem = bsr.bh.firstItem
|
||||
hasNextItem = true
|
||||
}
|
||||
items := bsr.Block.items
|
||||
|
@ -139,19 +140,20 @@ again:
|
|||
if bsr.blockItemIdx == len(bsr.Block.items) {
|
||||
// bsr.Block is fully read. Proceed to the next block.
|
||||
if bsr.Next() {
|
||||
heap.Push(&bsm.bsrHeap, bsr)
|
||||
heap.Fix(&bsm.bsrHeap, 0)
|
||||
goto again
|
||||
}
|
||||
if err := bsr.Error(); err != nil {
|
||||
return fmt.Errorf("cannot read storageBlock: %w", err)
|
||||
}
|
||||
heap.Pop(&bsm.bsrHeap)
|
||||
goto again
|
||||
}
|
||||
|
||||
// The next item in the bsr.Block exceeds nextItem.
|
||||
// Adjust bsr.bh.firstItem and return bsr to heap.
|
||||
bsr.bh.firstItem = append(bsr.bh.firstItem[:0], bsr.Block.items[bsr.blockItemIdx].String(bsr.Block.data)...)
|
||||
heap.Push(&bsm.bsrHeap, bsr)
|
||||
heap.Fix(&bsm.bsrHeap, 0)
|
||||
goto again
|
||||
}
|
||||
|
||||
|
@ -201,6 +203,21 @@ func (bsm *blockStreamMerger) flushIB(bsw *blockStreamWriter, ph *partHeader, it
|
|||
|
||||
type bsrHeap []*blockStreamReader
|
||||
|
||||
func (bh bsrHeap) getNextReader() *blockStreamReader {
|
||||
if len(bh) < 2 {
|
||||
return nil
|
||||
}
|
||||
if len(bh) < 3 {
|
||||
return bh[1]
|
||||
}
|
||||
a := bh[1]
|
||||
b := bh[2]
|
||||
if string(a.bh.firstItem) <= string(b.bh.firstItem) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (bh *bsrHeap) Len() int {
|
||||
return len(*bh)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
// Package metricsql has been moved to https://github.com/VictoriaMetrics/metricsql .
|
||||
//
|
||||
package metricsql
|
||||
|
|
|
@ -34,19 +34,19 @@ const maxColumnsPerRow = 64 * 1024
|
|||
//
|
||||
// s must have comma-separated list of the following entries:
|
||||
//
|
||||
// <column_pos>:<column_type>:<extension>
|
||||
// <column_pos>:<column_type>:<extension>
|
||||
//
|
||||
// Where:
|
||||
//
|
||||
// - <column_pos> is numeric csv column position. The first column has position 1.
|
||||
// - <column_type> is one of the following types:
|
||||
// - time - the corresponding column contains timestamp. Timestamp format is determined by <extension>. The following formats are supported:
|
||||
// - unix_s - unix timestamp in seconds
|
||||
// - unix_ms - unix timestamp in milliseconds
|
||||
// - unix_ns - unix_timestamp in nanoseconds
|
||||
// - rfc3339 - RFC3339 format in the form `2006-01-02T15:04:05Z07:00`
|
||||
// - label - the corresponding column contains metric label with the name set in <extension>.
|
||||
// - metric - the corresponding column contains metric value with the name set in <extension>.
|
||||
// - time - the corresponding column contains timestamp. Timestamp format is determined by <extension>. The following formats are supported:
|
||||
// - - unix_s - unix timestamp in seconds
|
||||
// - - unix_ms - unix timestamp in milliseconds
|
||||
// - - unix_ns - unix_timestamp in nanoseconds
|
||||
// - - rfc3339 - RFC3339 format in the form `2006-01-02T15:04:05Z07:00`
|
||||
// - label - the corresponding column contains metric label with the name set in <extension>.
|
||||
// - metric - the corresponding column contains metric value with the name set in <extension>.
|
||||
//
|
||||
// s must contain at least a single 'metric' column and no more than a single `time` column.
|
||||
func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) {
|
||||
|
|
|
@ -58,7 +58,7 @@ func (pts *partitionSearch) reset() {
|
|||
// tsids must be sorted.
|
||||
// tsids cannot be modified after the Init call, since it is owned by pts.
|
||||
//
|
||||
/// MustClose must be called when partition search is done.
|
||||
// MustClose must be called when partition search is done.
|
||||
func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
|
||||
if pts.needClosing {
|
||||
logger.Panicf("BUG: missing partitionSearch.MustClose call before the next call to Init")
|
||||
|
|
|
@ -511,6 +511,8 @@ type Metrics struct {
|
|||
PrefetchedMetricIDsSize uint64
|
||||
PrefetchedMetricIDsSizeBytes uint64
|
||||
|
||||
NextRetentionSeconds uint64
|
||||
|
||||
IndexDBMetrics IndexDBMetrics
|
||||
TableMetrics TableMetrics
|
||||
}
|
||||
|
@ -601,6 +603,8 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
|
|||
m.PrefetchedMetricIDsSize += uint64(prefetchedMetricIDs.Len())
|
||||
m.PrefetchedMetricIDsSizeBytes += uint64(prefetchedMetricIDs.SizeBytes())
|
||||
|
||||
m.NextRetentionSeconds = uint64(nextRetentionDuration(s.retentionMsecs).Seconds())
|
||||
|
||||
s.idb().UpdateMetrics(&m.IndexDBMetrics)
|
||||
s.tb.UpdateMetrics(&m.TableMetrics)
|
||||
}
|
||||
|
|
|
@ -567,16 +567,17 @@ func newMatchFuncForOrSuffixes(orValues []string) (reMatch func(b []byte) bool,
|
|||
}
|
||||
|
||||
// getOptimizedReMatchFunc tries returning optimized function for matching the given expr.
|
||||
// '.*'
|
||||
// '.+'
|
||||
// 'literal.*'
|
||||
// 'literal.+'
|
||||
// '.*literal'
|
||||
// '.+literal
|
||||
// '.*literal.*'
|
||||
// '.*literal.+'
|
||||
// '.+literal.*'
|
||||
// '.+literal.+'
|
||||
//
|
||||
// '.*'
|
||||
// '.+'
|
||||
// 'literal.*'
|
||||
// 'literal.+'
|
||||
// '.*literal'
|
||||
// '.+literal
|
||||
// '.*literal.*'
|
||||
// '.*literal.+'
|
||||
// '.+literal.*'
|
||||
// '.+literal.+'
|
||||
//
|
||||
// It returns reMatch if it cannot find optimized function.
|
||||
//
|
||||
|
|
|
@ -9,13 +9,13 @@ fi
|
|||
# Map to Debian architecture
|
||||
if [[ "$ARCH" == "amd64" ]]; then
|
||||
DEB_ARCH=amd64
|
||||
EXENAME_SRC="victoria-metrics-prod"
|
||||
EXENAME_SRC="victoria-metrics-linux-amd64-prod"
|
||||
elif [[ "$ARCH" == "arm64" ]]; then
|
||||
DEB_ARCH=arm64
|
||||
EXENAME_SRC="victoria-metrics-arm64-prod"
|
||||
EXENAME_SRC="victoria-metrics-linux-arm64-prod"
|
||||
elif [[ "$ARCH" == "arm" ]]; then
|
||||
DEB_ARCH=armhf
|
||||
EXENAME_SRC="victoria-metrics-arm-prod"
|
||||
EXENAME_SRC="victoria-metrics-linux-arm-prod"
|
||||
else
|
||||
echo "*** Unknown arch $ARCH"
|
||||
exit 1
|
||||
|
|
|
@ -15,10 +15,10 @@ fi
|
|||
# Map to Debian architecture
|
||||
if [[ "$ARCH" == "amd64" ]]; then
|
||||
RPM_ARCH=x86_64
|
||||
EXENAME_SRC="victoria-metrics-prod"
|
||||
EXENAME_SRC="victoria-metrics-linux-amd64-prod"
|
||||
elif [[ "$ARCH" == "arm64" ]]; then
|
||||
RPM_ARCH=aarch64
|
||||
EXENAME_SRC="victoria-metrics-arm64-prod"
|
||||
EXENAME_SRC="victoria-metrics-linux-arm64-prod"
|
||||
else
|
||||
echo "*** Unknown arch $ARCH"
|
||||
exit 1
|
||||
|
|
283
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
283
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -800,18 +800,33 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
|
@ -2005,6 +2020,76 @@ var awsPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfig": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfigdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -2578,6 +2663,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
|
@ -6258,6 +6346,76 @@ var awsPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dlm": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -12196,6 +12354,9 @@ var awsPartition = partition{
|
|||
},
|
||||
"kinesisvideo": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
|
@ -15131,6 +15292,14 @@ var awsPartition = partition{
|
|||
},
|
||||
"oidc": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "oidc.ap-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{
|
||||
|
@ -15235,6 +15404,14 @@ var awsPartition = partition{
|
|||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{
|
||||
Hostname: "oidc.me-south-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "me-south-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{
|
||||
|
@ -15840,6 +16017,14 @@ var awsPartition = partition{
|
|||
},
|
||||
"portal.sso": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "portal.sso.ap-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{
|
||||
|
@ -15944,6 +16129,14 @@ var awsPartition = partition{
|
|||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{
|
||||
Hostname: "portal.sso.me-south-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "me-south-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{
|
||||
|
@ -24113,6 +24306,16 @@ var awscnPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfig": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfigdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -24455,6 +24658,16 @@ var awscnPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dlm": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -26073,6 +26286,46 @@ var awsusgovPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfig": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "appconfig.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "appconfig.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "appconfig.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "appconfig.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"appconfigdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -26899,6 +27152,16 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"dlm": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{},
|
||||
|
@ -30649,6 +30912,16 @@ var awsisoPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfig": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"appconfigdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -30946,6 +31219,9 @@ var awsisoPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
@ -31393,6 +31669,13 @@ var awsisobPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"appconfig": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-isob-east-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.51"
|
||||
const SDKVersion = "1.44.53"
|
||||
|
|
9
vendor/github.com/klauspost/compress/README.md
generated
vendored
9
vendor/github.com/klauspost/compress/README.md
generated
vendored
|
@ -17,6 +17,15 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* June 29, 2022 (v1.15.7)
|
||||
|
||||
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
||||
* zip: Merge upstream https://github.com/klauspost/compress/pull/631
|
||||
* zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
|
||||
* zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
|
||||
* flate: Faster histograms https://github.com/klauspost/compress/pull/620
|
||||
* deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
|
||||
|
||||
* June 3, 2022 (v1.15.6)
|
||||
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
|
||||
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
|
||||
|
|
4
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
4
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
|
@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
// StatelessDeflate allows to compress directly to a Writer without retaining state.
|
||||
// StatelessDeflate allows compressing directly to a Writer without retaining state.
|
||||
// When returning everything will be flushed.
|
||||
// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
|
||||
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
|
||||
// Longer dictionaries will be truncated and will still produce valid output.
|
||||
// Sending nil dictionary is perfectly fine.
|
||||
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
|
||||
|
|
66
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
66
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
|
@ -252,42 +252,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
|
|||
return 0, z.err
|
||||
}
|
||||
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
||||
z.size += uint32(n)
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
for n == 0 {
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
||||
z.size += uint32(n)
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum and size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
||||
z.err = noEOF(err)
|
||||
return n, z.err
|
||||
}
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return n, io.EOF
|
||||
}
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
|
||||
// Finished file; check checksum and size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
||||
z.err = noEOF(err)
|
||||
return n, z.err
|
||||
}
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return n, io.EOF
|
||||
}
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Read from next file, if necessary.
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return z.Read(p)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
|
|
10
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
10
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
|
@ -27,10 +27,7 @@ func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
|||
const fallback8BitSize = 800
|
||||
|
||||
type decompress4xContext struct {
|
||||
pbr0 *bitReaderShifted
|
||||
pbr1 *bitReaderShifted
|
||||
pbr2 *bitReaderShifted
|
||||
pbr3 *bitReaderShifted
|
||||
pbr *[4]bitReaderShifted
|
||||
peekBits uint8
|
||||
out *byte
|
||||
dstEvery int
|
||||
|
@ -89,10 +86,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||
|
||||
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
|
||||
ctx := decompress4xContext{
|
||||
pbr0: &br[0],
|
||||
pbr1: &br[1],
|
||||
pbr2: &br[2],
|
||||
pbr3: &br[3],
|
||||
pbr: &br,
|
||||
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
|
||||
out: &out[0],
|
||||
dstEvery: dstEvery,
|
||||
|
|
666
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
666
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
|
@ -4,45 +4,40 @@
|
|||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $8-8
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVBQZX 32(AX), SI
|
||||
MOVQ 40(AX), DI
|
||||
MOVQ DI, BX
|
||||
MOVQ 72(AX), CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ 48(AX), R8
|
||||
MOVQ 56(AX), R9
|
||||
MOVQ (AX), R10
|
||||
MOVQ 8(AX), R11
|
||||
MOVQ 16(AX), R12
|
||||
MOVQ 24(AX), R13
|
||||
MOVBQZX 8(AX), DI
|
||||
MOVQ 16(AX), SI
|
||||
MOVQ 48(AX), BX
|
||||
MOVQ 24(AX), R9
|
||||
MOVQ 32(AX), R10
|
||||
MOVQ (AX), R11
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ BX, DI
|
||||
CMPQ DI, (SP)
|
||||
MOVQ SI, R8
|
||||
CMPQ R8, BX
|
||||
SETGE DL
|
||||
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R10), R14
|
||||
MOVBQZX 40(R10), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R10), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 24(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R10), BP
|
||||
MOVQ (R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R10)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 24(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
|
@ -51,57 +46,57 @@ main_loop:
|
|||
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R10)
|
||||
MOVB R15, 40(R10)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1.fillFast32()
|
||||
MOVQ 32(R11), R14
|
||||
MOVBQZX 40(R11), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 24(R11), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 72(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R11), BP
|
||||
MOVQ 48(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R11)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 72(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
|
@ -110,57 +105,57 @@ skip_fill0:
|
|||
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R11)
|
||||
MOVB R15, 40(R11)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br2.fillFast32()
|
||||
MOVQ 32(R12), R14
|
||||
MOVBQZX 40(R12), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 24(R12), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 120(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R12), BP
|
||||
MOVQ 96(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R12)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 120(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
|
@ -169,57 +164,57 @@ skip_fill1:
|
|||
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R12)
|
||||
MOVB R15, 40(R12)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br3.fillFast32()
|
||||
MOVQ 32(R13), R14
|
||||
MOVBQZX 40(R13), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 24(R13), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 168(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R13), BP
|
||||
MOVQ 144(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R13)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 168(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
|
@ -228,149 +223,142 @@ skip_fill2:
|
|||
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R13)
|
||||
MOVB R15, 40(R13)
|
||||
ADDQ $0x02, BX
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
ADDQ $0x02, SI
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ 40(AX), CX
|
||||
MOVQ BX, DX
|
||||
SUBQ CX, DX
|
||||
SHLQ $0x02, DX
|
||||
MOVQ DX, 64(AX)
|
||||
SUBQ 16(AX), SI
|
||||
SHLQ $0x02, SI
|
||||
MOVQ SI, 40(AX)
|
||||
RET
|
||||
|
||||
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8
|
||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), CX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ 40(CX), SI
|
||||
MOVQ SI, (SP)
|
||||
MOVQ 72(CX), DX
|
||||
MOVQ DX, 8(SP)
|
||||
MOVQ 48(CX), DI
|
||||
MOVQ 56(CX), R8
|
||||
MOVQ (CX), R9
|
||||
MOVQ 8(CX), R10
|
||||
MOVQ 16(CX), R11
|
||||
MOVQ 24(CX), R12
|
||||
MOVBQZX 8(CX), DI
|
||||
MOVQ 16(CX), BX
|
||||
MOVQ 48(CX), SI
|
||||
MOVQ 24(CX), R9
|
||||
MOVQ 32(CX), R10
|
||||
MOVQ (CX), R11
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ (SP), SI
|
||||
CMPQ SI, 8(SP)
|
||||
MOVQ BX, R8
|
||||
CMPQ R8, SI
|
||||
SETGE DL
|
||||
|
||||
// br1000.fillFast32()
|
||||
MOVQ 32(R9), R13
|
||||
MOVBQZX 40(R9), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1000
|
||||
MOVQ 24(R9), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R9), BP
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ (R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R9)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 24(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1000.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1000:
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
|
@ -378,88 +366,88 @@ skip_fill1000:
|
|||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R9)
|
||||
MOVB R14, 40(R9)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1001.fillFast32()
|
||||
MOVQ 32(R10), R13
|
||||
MOVBQZX 40(R10), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1001
|
||||
MOVQ 24(R10), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R10), BP
|
||||
// br1.fillFast32()
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 72(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 48(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R10)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 72(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1001.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1001:
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
|
@ -467,88 +455,88 @@ skip_fill1001:
|
|||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R10)
|
||||
MOVB R14, 40(R10)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1002.fillFast32()
|
||||
MOVQ 32(R11), R13
|
||||
MOVBQZX 40(R11), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1002
|
||||
MOVQ 24(R11), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R11), BP
|
||||
// br2.fillFast32()
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 120(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 96(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R11)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 120(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1002.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1002:
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
|
@ -556,88 +544,88 @@ skip_fill1002:
|
|||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R11)
|
||||
MOVB R14, 40(R11)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1003.fillFast32()
|
||||
MOVQ 32(R12), R13
|
||||
MOVBQZX 40(R12), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1003
|
||||
MOVQ 24(R12), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R12), BP
|
||||
// br3.fillFast32()
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 168(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 144(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R12)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 168(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1003.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1003:
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
|
@ -645,20 +633,18 @@ skip_fill1003:
|
|||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R12)
|
||||
MOVB R14, 40(R12)
|
||||
ADDQ $0x04, (SP)
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
ADDQ $0x04, BX
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ 40(AX), CX
|
||||
MOVQ (SP), DX
|
||||
SUBQ CX, DX
|
||||
SHLQ $0x02, DX
|
||||
MOVQ DX, 64(AX)
|
||||
SUBQ 16(AX), BX
|
||||
SHLQ $0x02, BX
|
||||
MOVQ BX, 40(AX)
|
||||
RET
|
||||
|
||||
// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||
|
@ -750,10 +736,8 @@ loop_condition:
|
|||
|
||||
// Update ctx structure
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, CX
|
||||
MOVQ 16(AX), DX
|
||||
SUBQ DX, CX
|
||||
MOVQ CX, 40(AX)
|
||||
SUBQ 16(AX), DX
|
||||
MOVQ DX, 40(AX)
|
||||
MOVQ (AX), AX
|
||||
MOVQ R9, 24(AX)
|
||||
MOVQ R10, 32(AX)
|
||||
|
@ -847,10 +831,8 @@ loop_condition:
|
|||
|
||||
// Update ctx structure
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, CX
|
||||
MOVQ 16(AX), DX
|
||||
SUBQ DX, CX
|
||||
MOVQ CX, 40(AX)
|
||||
SUBQ 16(AX), DX
|
||||
MOVQ DX, 40(AX)
|
||||
MOVQ (AX), AX
|
||||
MOVQ R9, 24(AX)
|
||||
MOVQ R10, 32(AX)
|
||||
|
|
1923
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
1923
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
File diff suppressed because it is too large
Load diff
29
vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || freebsd || netbsd || openbsd) && gc
|
||||
// +build darwin freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// System call support for RISCV64 BSD
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
5
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
5
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
|
@ -110,6 +110,11 @@ freebsd_arm64)
|
|||
mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||
;;
|
||||
freebsd_riscv64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||
;;
|
||||
netbsd_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="go run mksyscall.go -l32 -netbsd"
|
||||
|
|
63
vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
generated
vendored
Normal file
63
vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build riscv64 && freebsd
|
||||
// +build riscv64,freebsd
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func setTimespec(sec, nsec int64) Timespec {
|
||||
return Timespec{Sec: sec, Nsec: nsec}
|
||||
}
|
||||
|
||||
func setTimeval(sec, usec int64) Timeval {
|
||||
return Timeval{Sec: sec, Usec: usec}
|
||||
}
|
||||
|
||||
func SetKevent(k *Kevent_t, fd, mode, flags int) {
|
||||
k.Ident = uint64(fd)
|
||||
k.Filter = int16(mode)
|
||||
k.Flags = uint16(flags)
|
||||
}
|
||||
|
||||
func (iov *Iovec) SetLen(length int) {
|
||||
iov.Len = uint64(length)
|
||||
}
|
||||
|
||||
func (msghdr *Msghdr) SetControllen(length int) {
|
||||
msghdr.Controllen = uint32(length)
|
||||
}
|
||||
|
||||
func (msghdr *Msghdr) SetIovlen(length int) {
|
||||
msghdr.Iovlen = int32(length)
|
||||
}
|
||||
|
||||
func (cmsg *Cmsghdr) SetLen(length int) {
|
||||
cmsg.Len = uint32(length)
|
||||
}
|
||||
|
||||
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
|
||||
var writtenOut uint64 = 0
|
||||
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
|
||||
|
||||
written = int(writtenOut)
|
||||
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
||||
|
||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||
return int(ioDesc.Len), err
|
||||
}
|
2148
vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go
generated
vendored
Normal file
2148
vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1889
vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
generated
vendored
Normal file
1889
vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
394
vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go
generated
vendored
Normal file
394
vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go
generated
vendored
Normal file
|
@ -0,0 +1,394 @@
|
|||
// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
//go:build riscv64 && freebsd
|
||||
// +build riscv64,freebsd
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
// SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int
|
||||
SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void
|
||||
SYS_FORK = 2 // { int fork(void); }
|
||||
SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); }
|
||||
SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); }
|
||||
SYS_OPEN = 5 // { int open(char *path, int flags, int mode); }
|
||||
SYS_CLOSE = 6 // { int close(int fd); }
|
||||
SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); }
|
||||
SYS_LINK = 9 // { int link(char *path, char *link); }
|
||||
SYS_UNLINK = 10 // { int unlink(char *path); }
|
||||
SYS_CHDIR = 12 // { int chdir(char *path); }
|
||||
SYS_FCHDIR = 13 // { int fchdir(int fd); }
|
||||
SYS_CHMOD = 15 // { int chmod(char *path, int mode); }
|
||||
SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); }
|
||||
SYS_BREAK = 17 // { caddr_t break(char *nsize); }
|
||||
SYS_GETPID = 20 // { pid_t getpid(void); }
|
||||
SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); }
|
||||
SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); }
|
||||
SYS_SETUID = 23 // { int setuid(uid_t uid); }
|
||||
SYS_GETUID = 24 // { uid_t getuid(void); }
|
||||
SYS_GETEUID = 25 // { uid_t geteuid(void); }
|
||||
SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); }
|
||||
SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); }
|
||||
SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); }
|
||||
SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); }
|
||||
SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); }
|
||||
SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
|
||||
SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
|
||||
SYS_ACCESS = 33 // { int access(char *path, int amode); }
|
||||
SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); }
|
||||
SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); }
|
||||
SYS_SYNC = 36 // { int sync(void); }
|
||||
SYS_KILL = 37 // { int kill(int pid, int signum); }
|
||||
SYS_GETPPID = 39 // { pid_t getppid(void); }
|
||||
SYS_DUP = 41 // { int dup(u_int fd); }
|
||||
SYS_GETEGID = 43 // { gid_t getegid(void); }
|
||||
SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); }
|
||||
SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); }
|
||||
SYS_GETGID = 47 // { gid_t getgid(void); }
|
||||
SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); }
|
||||
SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); }
|
||||
SYS_ACCT = 51 // { int acct(char *path); }
|
||||
SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); }
|
||||
SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); }
|
||||
SYS_REBOOT = 55 // { int reboot(int opt); }
|
||||
SYS_REVOKE = 56 // { int revoke(char *path); }
|
||||
SYS_SYMLINK = 57 // { int symlink(char *path, char *link); }
|
||||
SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); }
|
||||
SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); }
|
||||
SYS_UMASK = 60 // { int umask(int newmask); }
|
||||
SYS_CHROOT = 61 // { int chroot(char *path); }
|
||||
SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); }
|
||||
SYS_VFORK = 66 // { int vfork(void); }
|
||||
SYS_SBRK = 69 // { int sbrk(int incr); }
|
||||
SYS_SSTK = 70 // { int sstk(int incr); }
|
||||
SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); }
|
||||
SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); }
|
||||
SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); }
|
||||
SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); }
|
||||
SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); }
|
||||
SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); }
|
||||
SYS_GETPGRP = 81 // { int getpgrp(void); }
|
||||
SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); }
|
||||
SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); }
|
||||
SYS_SWAPON = 85 // { int swapon(char *name); }
|
||||
SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); }
|
||||
SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); }
|
||||
SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); }
|
||||
SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); }
|
||||
SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
|
||||
SYS_FSYNC = 95 // { int fsync(int fd); }
|
||||
SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); }
|
||||
SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); }
|
||||
SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); }
|
||||
SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); }
|
||||
SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); }
|
||||
SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); }
|
||||
SYS_LISTEN = 106 // { int listen(int s, int backlog); }
|
||||
SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); }
|
||||
SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); }
|
||||
SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); }
|
||||
SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); }
|
||||
SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); }
|
||||
SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); }
|
||||
SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); }
|
||||
SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); }
|
||||
SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); }
|
||||
SYS_SETREGID = 127 // { int setregid(int rgid, int egid); }
|
||||
SYS_RENAME = 128 // { int rename(char *from, char *to); }
|
||||
SYS_FLOCK = 131 // { int flock(int fd, int how); }
|
||||
SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); }
|
||||
SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); }
|
||||
SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); }
|
||||
SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); }
|
||||
SYS_MKDIR = 136 // { int mkdir(char *path, int mode); }
|
||||
SYS_RMDIR = 137 // { int rmdir(char *path); }
|
||||
SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); }
|
||||
SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); }
|
||||
SYS_SETSID = 147 // { int setsid(void); }
|
||||
SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); }
|
||||
SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); }
|
||||
SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); }
|
||||
SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); }
|
||||
SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); }
|
||||
SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); }
|
||||
SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); }
|
||||
SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
|
||||
SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
|
||||
SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
|
||||
SYS_SETFIB = 175 // { int setfib(int fibnum); }
|
||||
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
|
||||
SYS_SETGID = 181 // { int setgid(gid_t gid); }
|
||||
SYS_SETEGID = 182 // { int setegid(gid_t egid); }
|
||||
SYS_SETEUID = 183 // { int seteuid(uid_t euid); }
|
||||
SYS_PATHCONF = 191 // { int pathconf(char *path, int name); }
|
||||
SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); }
|
||||
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
|
||||
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
|
||||
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
|
||||
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
|
||||
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
|
||||
SYS_UNDELETE = 205 // { int undelete(char *path); }
|
||||
SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); }
|
||||
SYS_GETPGID = 207 // { int getpgid(pid_t pid); }
|
||||
SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); }
|
||||
SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); }
|
||||
SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); }
|
||||
SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); }
|
||||
SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
|
||||
SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
|
||||
SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); }
|
||||
SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); }
|
||||
SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); }
|
||||
SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); }
|
||||
SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); }
|
||||
SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); }
|
||||
SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); }
|
||||
SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); }
|
||||
SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
|
||||
SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); }
|
||||
SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); }
|
||||
SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
|
||||
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
|
||||
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); }
|
||||
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); }
|
||||
SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
|
||||
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); }
|
||||
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
|
||||
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
|
||||
SYS_RFORK = 251 // { int rfork(int flags); }
|
||||
SYS_ISSETUGID = 253 // { int issetugid(void); }
|
||||
SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); }
|
||||
SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); }
|
||||
SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); }
|
||||
SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); }
|
||||
SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); }
|
||||
SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); }
|
||||
SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
|
||||
SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
|
||||
SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); }
|
||||
SYS_MODNEXT = 300 // { int modnext(int modid); }
|
||||
SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); }
|
||||
SYS_MODFNEXT = 302 // { int modfnext(int modid); }
|
||||
SYS_MODFIND = 303 // { int modfind(const char *name); }
|
||||
SYS_KLDLOAD = 304 // { int kldload(const char *file); }
|
||||
SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); }
|
||||
SYS_KLDFIND = 306 // { int kldfind(const char *file); }
|
||||
SYS_KLDNEXT = 307 // { int kldnext(int fileid); }
|
||||
SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); }
|
||||
SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); }
|
||||
SYS_GETSID = 310 // { int getsid(pid_t pid); }
|
||||
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
|
||||
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
|
||||
SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
|
||||
SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
|
||||
SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
|
||||
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
|
||||
SYS_YIELD = 321 // { int yield(void); }
|
||||
SYS_MLOCKALL = 324 // { int mlockall(int how); }
|
||||
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
|
||||
SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); }
|
||||
SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); }
|
||||
SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); }
|
||||
SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); }
|
||||
SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); }
|
||||
SYS_SCHED_YIELD = 331 // { int sched_yield (void); }
|
||||
SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); }
|
||||
SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); }
|
||||
SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); }
|
||||
SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); }
|
||||
SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); }
|
||||
SYS_JAIL = 338 // { int jail(struct jail *jail); }
|
||||
SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); }
|
||||
SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); }
|
||||
SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); }
|
||||
SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); }
|
||||
SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); }
|
||||
SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); }
|
||||
SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); }
|
||||
SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); }
|
||||
SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
|
||||
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
|
||||
SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); }
|
||||
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
|
||||
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
|
||||
SYS_KQUEUE = 362 // { int kqueue(void); }
|
||||
SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
|
||||
SYS___SETUGID = 374 // { int __setugid(int flag); }
|
||||
SYS_EACCESS = 376 // { int eaccess(char *path, int amode); }
|
||||
SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); }
|
||||
SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); }
|
||||
SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); }
|
||||
SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); }
|
||||
SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); }
|
||||
SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); }
|
||||
SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); }
|
||||
SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); }
|
||||
SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); }
|
||||
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
|
||||
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
|
||||
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
|
||||
SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); }
|
||||
SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); }
|
||||
SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); }
|
||||
SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); }
|
||||
SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); }
|
||||
SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); }
|
||||
SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); }
|
||||
SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); }
|
||||
SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); }
|
||||
SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); }
|
||||
SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); }
|
||||
SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); }
|
||||
SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); }
|
||||
SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); }
|
||||
SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); }
|
||||
SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); }
|
||||
SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); }
|
||||
SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); }
|
||||
SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); }
|
||||
SYS_SWAPOFF = 424 // { int swapoff(const char *name); }
|
||||
SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); }
|
||||
SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); }
|
||||
SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); }
|
||||
SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); }
|
||||
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
|
||||
SYS_THR_SELF = 432 // { int thr_self(long *id); }
|
||||
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
|
||||
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
|
||||
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); }
|
||||
SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); }
|
||||
SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); }
|
||||
SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); }
|
||||
SYS_THR_WAKE = 443 // { int thr_wake(long id); }
|
||||
SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); }
|
||||
SYS_AUDIT = 445 // { int audit(const void *record, u_int length); }
|
||||
SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); }
|
||||
SYS_GETAUID = 447 // { int getauid(uid_t *auid); }
|
||||
SYS_SETAUID = 448 // { int setauid(uid_t *auid); }
|
||||
SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); }
|
||||
SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); }
|
||||
SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); }
|
||||
SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); }
|
||||
SYS_AUDITCTL = 453 // { int auditctl(char *path); }
|
||||
SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); }
|
||||
SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); }
|
||||
SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); }
|
||||
SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); }
|
||||
SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); }
|
||||
SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); }
|
||||
SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); }
|
||||
SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); }
|
||||
SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); }
|
||||
SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); }
|
||||
SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); }
|
||||
SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); }
|
||||
SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); }
|
||||
SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); }
|
||||
SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
|
||||
SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
|
||||
SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
|
||||
SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); }
|
||||
SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); }
|
||||
SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); }
|
||||
SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); }
|
||||
SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); }
|
||||
SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); }
|
||||
SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); }
|
||||
SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); }
|
||||
SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); }
|
||||
SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); }
|
||||
SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); }
|
||||
SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); }
|
||||
SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); }
|
||||
SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); }
|
||||
SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); }
|
||||
SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); }
|
||||
SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); }
|
||||
SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); }
|
||||
SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); }
|
||||
SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); }
|
||||
SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); }
|
||||
SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); }
|
||||
SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); }
|
||||
SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); }
|
||||
SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); }
|
||||
SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); }
|
||||
SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); }
|
||||
SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); }
|
||||
SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); }
|
||||
SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); }
|
||||
SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); }
|
||||
SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); }
|
||||
SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); }
|
||||
SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); }
|
||||
SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); }
|
||||
SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); }
|
||||
SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); }
|
||||
SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); }
|
||||
SYS_CAP_ENTER = 516 // { int cap_enter(void); }
|
||||
SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }
|
||||
SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); }
|
||||
SYS_PDKILL = 519 // { int pdkill(int fd, int signum); }
|
||||
SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); }
|
||||
SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); }
|
||||
SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); }
|
||||
SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); }
|
||||
SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
|
||||
SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
|
||||
SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
|
||||
SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
|
||||
SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
|
||||
SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); }
|
||||
SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); }
|
||||
SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); }
|
||||
SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); }
|
||||
SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); }
|
||||
SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); }
|
||||
SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); }
|
||||
SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); }
|
||||
SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); }
|
||||
SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); }
|
||||
SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); }
|
||||
SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); }
|
||||
SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); }
|
||||
SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); }
|
||||
SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); }
|
||||
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
|
||||
SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
|
||||
SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
|
||||
SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
|
||||
SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); }
|
||||
SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); }
|
||||
SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); }
|
||||
SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); }
|
||||
SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); }
|
||||
SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); }
|
||||
SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
|
||||
SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
|
||||
SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); }
|
||||
SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); }
|
||||
SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); }
|
||||
SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); }
|
||||
SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); }
|
||||
SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); }
|
||||
SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); }
|
||||
SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); }
|
||||
SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); }
|
||||
SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); }
|
||||
SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); }
|
||||
)
|
626
vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
generated
vendored
Normal file
626
vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
generated
vendored
Normal file
|
@ -0,0 +1,626 @@
|
|||
// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
//go:build riscv64 && freebsd
|
||||
// +build riscv64,freebsd
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
SizeofPtr = 0x8
|
||||
SizeofShort = 0x2
|
||||
SizeofInt = 0x4
|
||||
SizeofLong = 0x8
|
||||
SizeofLongLong = 0x8
|
||||
)
|
||||
|
||||
type (
|
||||
_C_short int16
|
||||
_C_int int32
|
||||
_C_long int64
|
||||
_C_long_long int64
|
||||
)
|
||||
|
||||
type Timespec struct {
|
||||
Sec int64
|
||||
Nsec int64
|
||||
}
|
||||
|
||||
type Timeval struct {
|
||||
Sec int64
|
||||
Usec int64
|
||||
}
|
||||
|
||||
type Time_t int64
|
||||
|
||||
type Rusage struct {
|
||||
Utime Timeval
|
||||
Stime Timeval
|
||||
Maxrss int64
|
||||
Ixrss int64
|
||||
Idrss int64
|
||||
Isrss int64
|
||||
Minflt int64
|
||||
Majflt int64
|
||||
Nswap int64
|
||||
Inblock int64
|
||||
Oublock int64
|
||||
Msgsnd int64
|
||||
Msgrcv int64
|
||||
Nsignals int64
|
||||
Nvcsw int64
|
||||
Nivcsw int64
|
||||
}
|
||||
|
||||
type Rlimit struct {
|
||||
Cur int64
|
||||
Max int64
|
||||
}
|
||||
|
||||
type _Gid_t uint32
|
||||
|
||||
const (
|
||||
_statfsVersion = 0x20140518
|
||||
_dirblksiz = 0x400
|
||||
)
|
||||
|
||||
type Stat_t struct {
|
||||
Dev uint64
|
||||
Ino uint64
|
||||
Nlink uint64
|
||||
Mode uint16
|
||||
_0 int16
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
_1 int32
|
||||
Rdev uint64
|
||||
Atim Timespec
|
||||
Mtim Timespec
|
||||
Ctim Timespec
|
||||
Btim Timespec
|
||||
Size int64
|
||||
Blocks int64
|
||||
Blksize int32
|
||||
Flags uint32
|
||||
Gen uint64
|
||||
Spare [10]uint64
|
||||
}
|
||||
|
||||
type Statfs_t struct {
|
||||
Version uint32
|
||||
Type uint32
|
||||
Flags uint64
|
||||
Bsize uint64
|
||||
Iosize uint64
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail int64
|
||||
Files uint64
|
||||
Ffree int64
|
||||
Syncwrites uint64
|
||||
Asyncwrites uint64
|
||||
Syncreads uint64
|
||||
Asyncreads uint64
|
||||
Spare [10]uint64
|
||||
Namemax uint32
|
||||
Owner uint32
|
||||
Fsid Fsid
|
||||
Charspare [80]int8
|
||||
Fstypename [16]byte
|
||||
Mntfromname [1024]byte
|
||||
Mntonname [1024]byte
|
||||
}
|
||||
|
||||
type Flock_t struct {
|
||||
Start int64
|
||||
Len int64
|
||||
Pid int32
|
||||
Type int16
|
||||
Whence int16
|
||||
Sysid int32
|
||||
_ [4]byte
|
||||
}
|
||||
|
||||
type Dirent struct {
|
||||
Fileno uint64
|
||||
Off int64
|
||||
Reclen uint16
|
||||
Type uint8
|
||||
Pad0 uint8
|
||||
Namlen uint16
|
||||
Pad1 uint16
|
||||
Name [256]int8
|
||||
}
|
||||
|
||||
type Fsid struct {
|
||||
Val [2]int32
|
||||
}
|
||||
|
||||
const (
|
||||
PathMax = 0x400
|
||||
)
|
||||
|
||||
const (
|
||||
FADV_NORMAL = 0x0
|
||||
FADV_RANDOM = 0x1
|
||||
FADV_SEQUENTIAL = 0x2
|
||||
FADV_WILLNEED = 0x3
|
||||
FADV_DONTNEED = 0x4
|
||||
FADV_NOREUSE = 0x5
|
||||
)
|
||||
|
||||
type RawSockaddrInet4 struct {
|
||||
Len uint8
|
||||
Family uint8
|
||||
Port uint16
|
||||
Addr [4]byte /* in_addr */
|
||||
Zero [8]int8
|
||||
}
|
||||
|
||||
type RawSockaddrInet6 struct {
|
||||
Len uint8
|
||||
Family uint8
|
||||
Port uint16
|
||||
Flowinfo uint32
|
||||
Addr [16]byte /* in6_addr */
|
||||
Scope_id uint32
|
||||
}
|
||||
|
||||
type RawSockaddrUnix struct {
|
||||
Len uint8
|
||||
Family uint8
|
||||
Path [104]int8
|
||||
}
|
||||
|
||||
type RawSockaddrDatalink struct {
|
||||
Len uint8
|
||||
Family uint8
|
||||
Index uint16
|
||||
Type uint8
|
||||
Nlen uint8
|
||||
Alen uint8
|
||||
Slen uint8
|
||||
Data [46]int8
|
||||
}
|
||||
|
||||
type RawSockaddr struct {
|
||||
Len uint8
|
||||
Family uint8
|
||||
Data [14]int8
|
||||
}
|
||||
|
||||
type RawSockaddrAny struct {
|
||||
Addr RawSockaddr
|
||||
Pad [92]int8
|
||||
}
|
||||
|
||||
type _Socklen uint32
|
||||
|
||||
type Xucred struct {
|
||||
Version uint32
|
||||
Uid uint32
|
||||
Ngroups int16
|
||||
Groups [16]uint32
|
||||
_ *byte
|
||||
}
|
||||
|
||||
type Linger struct {
|
||||
Onoff int32
|
||||
Linger int32
|
||||
}
|
||||
|
||||
type Iovec struct {
|
||||
Base *byte
|
||||
Len uint64
|
||||
}
|
||||
|
||||
type IPMreq struct {
|
||||
Multiaddr [4]byte /* in_addr */
|
||||
Interface [4]byte /* in_addr */
|
||||
}
|
||||
|
||||
type IPMreqn struct {
|
||||
Multiaddr [4]byte /* in_addr */
|
||||
Address [4]byte /* in_addr */
|
||||
Ifindex int32
|
||||
}
|
||||
|
||||
type IPv6Mreq struct {
|
||||
Multiaddr [16]byte /* in6_addr */
|
||||
Interface uint32
|
||||
}
|
||||
|
||||
type Msghdr struct {
|
||||
Name *byte
|
||||
Namelen uint32
|
||||
Iov *Iovec
|
||||
Iovlen int32
|
||||
Control *byte
|
||||
Controllen uint32
|
||||
Flags int32
|
||||
}
|
||||
|
||||
type Cmsghdr struct {
|
||||
Len uint32
|
||||
Level int32
|
||||
Type int32
|
||||
}
|
||||
|
||||
type Inet6Pktinfo struct {
|
||||
Addr [16]byte /* in6_addr */
|
||||
Ifindex uint32
|
||||
}
|
||||
|
||||
type IPv6MTUInfo struct {
|
||||
Addr RawSockaddrInet6
|
||||
Mtu uint32
|
||||
}
|
||||
|
||||
type ICMPv6Filter struct {
|
||||
Filt [8]uint32
|
||||
}
|
||||
|
||||
const (
|
||||
SizeofSockaddrInet4 = 0x10
|
||||
SizeofSockaddrInet6 = 0x1c
|
||||
SizeofSockaddrAny = 0x6c
|
||||
SizeofSockaddrUnix = 0x6a
|
||||
SizeofSockaddrDatalink = 0x36
|
||||
SizeofXucred = 0x58
|
||||
SizeofLinger = 0x8
|
||||
SizeofIovec = 0x10
|
||||
SizeofIPMreq = 0x8
|
||||
SizeofIPMreqn = 0xc
|
||||
SizeofIPv6Mreq = 0x14
|
||||
SizeofMsghdr = 0x30
|
||||
SizeofCmsghdr = 0xc
|
||||
SizeofInet6Pktinfo = 0x14
|
||||
SizeofIPv6MTUInfo = 0x20
|
||||
SizeofICMPv6Filter = 0x20
|
||||
)
|
||||
|
||||
const (
|
||||
PTRACE_TRACEME = 0x0
|
||||
PTRACE_CONT = 0x7
|
||||
PTRACE_KILL = 0x8
|
||||
)
|
||||
|
||||
type PtraceLwpInfoStruct struct {
|
||||
Lwpid int32
|
||||
Event int32
|
||||
Flags int32
|
||||
Sigmask Sigset_t
|
||||
Siglist Sigset_t
|
||||
Siginfo __Siginfo
|
||||
Tdname [20]int8
|
||||
Child_pid int32
|
||||
Syscall_code uint32
|
||||
Syscall_narg uint32
|
||||
}
|
||||
|
||||
type __Siginfo struct {
|
||||
Signo int32
|
||||
Errno int32
|
||||
Code int32
|
||||
Pid int32
|
||||
Uid uint32
|
||||
Status int32
|
||||
Addr *byte
|
||||
Value [8]byte
|
||||
_ [40]byte
|
||||
}
|
||||
|
||||
type Sigset_t struct {
|
||||
Val [4]uint32
|
||||
}
|
||||
|
||||
type Reg struct {
|
||||
Ra uint64
|
||||
Sp uint64
|
||||
Gp uint64
|
||||
Tp uint64
|
||||
T [7]uint64
|
||||
S [12]uint64
|
||||
A [8]uint64
|
||||
Sepc uint64
|
||||
Sstatus uint64
|
||||
}
|
||||
|
||||
type FpReg struct {
|
||||
X [32][2]uint64
|
||||
Fcsr uint64
|
||||
}
|
||||
|
||||
type FpExtendedPrecision struct{}
|
||||
|
||||
type PtraceIoDesc struct {
|
||||
Op int32
|
||||
Offs *byte
|
||||
Addr *byte
|
||||
Len uint64
|
||||
}
|
||||
|
||||
type Kevent_t struct {
|
||||
Ident uint64
|
||||
Filter int16
|
||||
Flags uint16
|
||||
Fflags uint32
|
||||
Data int64
|
||||
Udata *byte
|
||||
Ext [4]uint64
|
||||
}
|
||||
|
||||
type FdSet struct {
|
||||
Bits [16]uint64
|
||||
}
|
||||
|
||||
const (
|
||||
sizeofIfMsghdr = 0xa8
|
||||
SizeofIfMsghdr = 0xa8
|
||||
sizeofIfData = 0x98
|
||||
SizeofIfData = 0x98
|
||||
SizeofIfaMsghdr = 0x14
|
||||
SizeofIfmaMsghdr = 0x10
|
||||
SizeofIfAnnounceMsghdr = 0x18
|
||||
SizeofRtMsghdr = 0x98
|
||||
SizeofRtMetrics = 0x70
|
||||
)
|
||||
|
||||
type ifMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Addrs int32
|
||||
Flags int32
|
||||
Index uint16
|
||||
_ uint16
|
||||
Data ifData
|
||||
}
|
||||
|
||||
type IfMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Addrs int32
|
||||
Flags int32
|
||||
Index uint16
|
||||
Data IfData
|
||||
}
|
||||
|
||||
type ifData struct {
|
||||
Type uint8
|
||||
Physical uint8
|
||||
Addrlen uint8
|
||||
Hdrlen uint8
|
||||
Link_state uint8
|
||||
Vhid uint8
|
||||
Datalen uint16
|
||||
Mtu uint32
|
||||
Metric uint32
|
||||
Baudrate uint64
|
||||
Ipackets uint64
|
||||
Ierrors uint64
|
||||
Opackets uint64
|
||||
Oerrors uint64
|
||||
Collisions uint64
|
||||
Ibytes uint64
|
||||
Obytes uint64
|
||||
Imcasts uint64
|
||||
Omcasts uint64
|
||||
Iqdrops uint64
|
||||
Oqdrops uint64
|
||||
Noproto uint64
|
||||
Hwassist uint64
|
||||
_ [8]byte
|
||||
_ [16]byte
|
||||
}
|
||||
|
||||
type IfData struct {
|
||||
Type uint8
|
||||
Physical uint8
|
||||
Addrlen uint8
|
||||
Hdrlen uint8
|
||||
Link_state uint8
|
||||
Spare_char1 uint8
|
||||
Spare_char2 uint8
|
||||
Datalen uint8
|
||||
Mtu uint64
|
||||
Metric uint64
|
||||
Baudrate uint64
|
||||
Ipackets uint64
|
||||
Ierrors uint64
|
||||
Opackets uint64
|
||||
Oerrors uint64
|
||||
Collisions uint64
|
||||
Ibytes uint64
|
||||
Obytes uint64
|
||||
Imcasts uint64
|
||||
Omcasts uint64
|
||||
Iqdrops uint64
|
||||
Noproto uint64
|
||||
Hwassist uint64
|
||||
Epoch int64
|
||||
Lastchange Timeval
|
||||
}
|
||||
|
||||
type IfaMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Addrs int32
|
||||
Flags int32
|
||||
Index uint16
|
||||
_ uint16
|
||||
Metric int32
|
||||
}
|
||||
|
||||
type IfmaMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Addrs int32
|
||||
Flags int32
|
||||
Index uint16
|
||||
_ uint16
|
||||
}
|
||||
|
||||
type IfAnnounceMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Index uint16
|
||||
Name [16]int8
|
||||
What uint16
|
||||
}
|
||||
|
||||
type RtMsghdr struct {
|
||||
Msglen uint16
|
||||
Version uint8
|
||||
Type uint8
|
||||
Index uint16
|
||||
_ uint16
|
||||
Flags int32
|
||||
Addrs int32
|
||||
Pid int32
|
||||
Seq int32
|
||||
Errno int32
|
||||
Fmask int32
|
||||
Inits uint64
|
||||
Rmx RtMetrics
|
||||
}
|
||||
|
||||
type RtMetrics struct {
|
||||
Locks uint64
|
||||
Mtu uint64
|
||||
Hopcount uint64
|
||||
Expire uint64
|
||||
Recvpipe uint64
|
||||
Sendpipe uint64
|
||||
Ssthresh uint64
|
||||
Rtt uint64
|
||||
Rttvar uint64
|
||||
Pksent uint64
|
||||
Weight uint64
|
||||
Nhidx uint64
|
||||
Filler [2]uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SizeofBpfVersion = 0x4
|
||||
SizeofBpfStat = 0x8
|
||||
SizeofBpfZbuf = 0x18
|
||||
SizeofBpfProgram = 0x10
|
||||
SizeofBpfInsn = 0x8
|
||||
SizeofBpfHdr = 0x20
|
||||
SizeofBpfZbufHeader = 0x20
|
||||
)
|
||||
|
||||
type BpfVersion struct {
|
||||
Major uint16
|
||||
Minor uint16
|
||||
}
|
||||
|
||||
type BpfStat struct {
|
||||
Recv uint32
|
||||
Drop uint32
|
||||
}
|
||||
|
||||
type BpfZbuf struct {
|
||||
Bufa *byte
|
||||
Bufb *byte
|
||||
Buflen uint64
|
||||
}
|
||||
|
||||
type BpfProgram struct {
|
||||
Len uint32
|
||||
Insns *BpfInsn
|
||||
}
|
||||
|
||||
type BpfInsn struct {
|
||||
Code uint16
|
||||
Jt uint8
|
||||
Jf uint8
|
||||
K uint32
|
||||
}
|
||||
|
||||
type BpfHdr struct {
|
||||
Tstamp Timeval
|
||||
Caplen uint32
|
||||
Datalen uint32
|
||||
Hdrlen uint16
|
||||
_ [6]byte
|
||||
}
|
||||
|
||||
type BpfZbufHeader struct {
|
||||
Kernel_gen uint32
|
||||
Kernel_len uint32
|
||||
User_gen uint32
|
||||
_ [5]uint32
|
||||
}
|
||||
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]uint8
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
||||
|
||||
type Winsize struct {
|
||||
Row uint16
|
||||
Col uint16
|
||||
Xpixel uint16
|
||||
Ypixel uint16
|
||||
}
|
||||
|
||||
const (
|
||||
AT_FDCWD = -0x64
|
||||
AT_EACCESS = 0x100
|
||||
AT_SYMLINK_NOFOLLOW = 0x200
|
||||
AT_SYMLINK_FOLLOW = 0x400
|
||||
AT_REMOVEDIR = 0x800
|
||||
)
|
||||
|
||||
type PollFd struct {
|
||||
Fd int32
|
||||
Events int16
|
||||
Revents int16
|
||||
}
|
||||
|
||||
const (
|
||||
POLLERR = 0x8
|
||||
POLLHUP = 0x10
|
||||
POLLIN = 0x1
|
||||
POLLINIGNEOF = 0x2000
|
||||
POLLNVAL = 0x20
|
||||
POLLOUT = 0x4
|
||||
POLLPRI = 0x2
|
||||
POLLRDBAND = 0x80
|
||||
POLLRDNORM = 0x40
|
||||
POLLWRBAND = 0x100
|
||||
POLLWRNORM = 0x4
|
||||
)
|
||||
|
||||
type CapRights struct {
|
||||
Rights [2]uint64
|
||||
}
|
||||
|
||||
type Utsname struct {
|
||||
Sysname [256]byte
|
||||
Nodename [256]byte
|
||||
Release [256]byte
|
||||
Version [256]byte
|
||||
Machine [256]byte
|
||||
}
|
||||
|
||||
const SizeofClockinfo = 0x14
|
||||
|
||||
type Clockinfo struct {
|
||||
Hz int32
|
||||
Tick int32
|
||||
Spare int32
|
||||
Stathz int32
|
||||
Profhz int32
|
||||
}
|
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
|
@ -5,4 +5,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "0.86.0"
|
||||
const Version = "0.87.0"
|
||||
|
|
6
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
6
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
|
@ -26,7 +26,7 @@
|
|||
"description": "Stores and retrieves potentially large, immutable data objects.",
|
||||
"discoveryVersion": "v1",
|
||||
"documentationLink": "https://developers.google.com/storage/docs/json_api/",
|
||||
"etag": "\"3130353432333136333236323133333532323835\"",
|
||||
"etag": "\"3134363638303431303535363634343235383633\"",
|
||||
"icons": {
|
||||
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
|
||||
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
|
||||
|
@ -3005,7 +3005,7 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"revision": "20220608",
|
||||
"revision": "20220705",
|
||||
"rootUrl": "https://storage.googleapis.com/",
|
||||
"schemas": {
|
||||
"Bucket": {
|
||||
|
@ -3995,7 +3995,7 @@
|
|||
"type": "string"
|
||||
},
|
||||
"updated": {
|
||||
"description": "The modification time of the object metadata in RFC 3339 format.",
|
||||
"description": "The modification time of the object metadata in RFC 3339 format. Set initially to object creation time and then updated whenever any metadata of the object changes. This includes changes made by a requester, such as modifying custom metadata, as well as changes made by Cloud Storage on behalf of a requester, such as changing the storage class based on an Object Lifecycle Configuration.",
|
||||
"format": "date-time",
|
||||
"type": "string"
|
||||
}
|
||||
|
|
7
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
7
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
|
@ -1889,7 +1889,12 @@ type Object struct {
|
|||
TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
|
||||
|
||||
// Updated: The modification time of the object metadata in RFC 3339
|
||||
// format.
|
||||
// format. Set initially to object creation time and then updated
|
||||
// whenever any metadata of the object changes. This includes changes
|
||||
// made by a requester, such as modifying custom metadata, as well as
|
||||
// changes made by Cloud Storage on behalf of a requester, such as
|
||||
// changing the storage class based on an Object Lifecycle
|
||||
// Configuration.
|
||||
Updated string `json:"updated,omitempty"`
|
||||
|
||||
// ServerResponse contains the HTTP response code and headers from the
|
||||
|
|
4
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
4
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
|
@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
|||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
state: connectivity.Connecting,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
|
@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
b.regeneratePicker()
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
4
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
||||
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
||||
|
||||
for _, opt := range extraDialOptions {
|
||||
opt.apply(&cc.dopts)
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cc.dopts)
|
||||
}
|
||||
|
|
54
vendor/google.golang.org/grpc/credentials/google/xds.go
generated
vendored
54
vendor/google.golang.org/grpc/credentials/google/xds.go
generated
vendored
|
@ -21,6 +21,7 @@ package google
|
|||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
@ -28,12 +29,16 @@ import (
|
|||
)
|
||||
|
||||
const cfeClusterNamePrefix = "google_cfe_"
|
||||
const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_"
|
||||
const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com"
|
||||
|
||||
// clusterTransportCreds is a combo of TLS + ALTS.
|
||||
//
|
||||
// On the client, ClientHandshake picks TLS or ALTS based on address attributes.
|
||||
// - if attributes has cluster name
|
||||
// - if cluster name has prefix "google_cfe_", use TLS
|
||||
// - if cluster name has prefix "google_cfe_", or
|
||||
// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_",
|
||||
// use TLS
|
||||
// - otherwise, use ALTS
|
||||
// - else, do TLS
|
||||
//
|
||||
|
@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust
|
|||
}
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
// clusterName returns the xDS cluster name stored in the attributes in the
|
||||
// context.
|
||||
func clusterName(ctx context.Context) string {
|
||||
chi := credentials.ClientHandshakeInfoFromContext(ctx)
|
||||
if chi.Attributes == nil {
|
||||
return c.tls.ClientHandshake(ctx, authority, rawConn)
|
||||
return ""
|
||||
}
|
||||
cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes)
|
||||
if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) {
|
||||
return c.tls.ClientHandshake(ctx, authority, rawConn)
|
||||
cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes)
|
||||
return cluster
|
||||
}
|
||||
|
||||
// isDirectPathCluster returns true if the cluster in the context is a
|
||||
// directpath cluster, meaning ALTS should be used.
|
||||
func isDirectPathCluster(ctx context.Context) bool {
|
||||
cluster := clusterName(ctx)
|
||||
if cluster == "" {
|
||||
// No cluster; not xDS; use TLS.
|
||||
return false
|
||||
}
|
||||
// If attributes have cluster name, and cluster name is not cfe, it's a
|
||||
// backend address, use ALTS.
|
||||
return c.alts.ClientHandshake(ctx, authority, rawConn)
|
||||
if strings.HasPrefix(cluster, cfeClusterNamePrefix) {
|
||||
// xDS cluster prefixed by "google_cfe_"; use TLS.
|
||||
return false
|
||||
}
|
||||
if !strings.HasPrefix(cluster, "xdstp:") {
|
||||
// Other xDS cluster name; use ALTS.
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(cluster)
|
||||
if err != nil {
|
||||
// Shouldn't happen, but assume ALTS.
|
||||
return true
|
||||
}
|
||||
// If authority AND path match our CFE checks, use TLS; otherwise use ALTS.
|
||||
return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix)
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
if isDirectPathCluster(ctx) {
|
||||
// If attributes have cluster name, and cluster name is not cfe, it's a
|
||||
// backend address, use ALTS.
|
||||
return c.alts.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
return c.tls.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
|
|
13
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
13
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -35,6 +35,15 @@ import (
|
|||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.AddExtraDialOptions = func(opt ...DialOption) {
|
||||
extraDialOptions = append(extraDialOptions, opt...)
|
||||
}
|
||||
internal.ClearExtraDialOptions = func() {
|
||||
extraDialOptions = nil
|
||||
}
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
|
@ -70,6 +79,8 @@ type DialOption interface {
|
|||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
var extraDialOptions []DialOption
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
|
@ -380,7 +391,7 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
|||
// all the RPCs and underlying network connections in this ClientConn.
|
||||
func WithStatsHandler(h stats.Handler) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.StatsHandler = h
|
||||
o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
2
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
|
@ -193,6 +193,8 @@ func (gsb *Balancer) ExitIdle() {
|
|||
ei.ExitIdle()
|
||||
return
|
||||
}
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
for sc := range balToUpdate.subconns {
|
||||
sc.Connect()
|
||||
}
|
||||
|
|
4
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
|
@ -42,14 +42,14 @@ var binLogger Logger
|
|||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetLogger gets the binarg logger.
|
||||
// GetLogger gets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func GetLogger() Logger {
|
||||
|
|
2
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
|
@ -77,7 +77,7 @@ var (
|
|||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
|
||||
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
|
|
70
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
70
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
|
@ -63,6 +63,76 @@ var (
|
|||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
// AddExtraServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddExtraServerOptions interface{} // func(opt ...ServerOption)
|
||||
// ClearExtraServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearExtraServerOptions func()
|
||||
// AddExtraDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddExtraDialOptions interface{} // func(opt ...DialOption)
|
||||
// ClearExtraDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearExtraDialOptions func()
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
// the provided xds bootstrap config instead of the global configuration from
|
||||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
|
||||
|
||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||
// variable.
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
RegisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
|
||||
// Specifier Plugin for testing purposes. This is needed because there is no way
|
||||
// to unregister the RLS Cluster Specifier Plugin after registering it solely
|
||||
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
UnregisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
|
||||
// purposes, regardless of the RBAC environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
RegisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
|
||||
// testing purposes. This is needed because there is no way to unregister the
|
||||
// HTTP Filter after registering it solely for testing purposes using
|
||||
// RegisterRBACHTTPFilterForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// RegisterOutlierDetectionBalancerForTesting registers the Outlier
|
||||
// Detection Balancer for testing purposes, regardless of the Outlier
|
||||
// Detection environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the Outlier Detection env var is removed.
|
||||
RegisterOutlierDetectionBalancerForTesting func()
|
||||
|
||||
// UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier
|
||||
// Detection Balancer for testing purposes. This is needed because there is
|
||||
// no way to unregister the Outlier Detection Balancer after registering it
|
||||
// solely for testing purposes using
|
||||
// RegisterOutlierDetectionBalancerForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the Outlier Detection env var is removed.
|
||||
UnregisterOutlierDetectionBalancerForTesting func()
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
|
|
22
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
22
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
|
@ -49,7 +49,7 @@ import (
|
|||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ type serverHandlerTransport struct {
|
|||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() {
|
||||
|
@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
for _, sh := range ht.stats {
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
|
@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
for _, sh := range ht.stats {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
sh.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
|
@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
|
|
45
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
45
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
|
@ -90,7 +90,7 @@ type http2Client struct {
|
|||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandlers []stats.Handler
|
||||
|
||||
initialWindowSize int32
|
||||
|
||||
|
@ -311,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
isSecure: isSecure,
|
||||
perRPCCreds: perRPCCreds,
|
||||
kp: kp,
|
||||
statsHandler: opts.StatsHandler,
|
||||
statsHandlers: opts.StatsHandlers,
|
||||
initialWindowSize: initialWindowSize,
|
||||
onPrefaceReceipt: onPrefaceReceipt,
|
||||
nextID: 1,
|
||||
|
@ -341,15 +341,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.statsHandlers {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
if err != nil {
|
||||
|
@ -773,24 +773,27 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
if len(t.statsHandlers) != 0 {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
for _, sh := range t.statsHandlers {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
// Note: Creating a new stats object to prevent pollution.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
@ -916,11 +919,11 @@ func (t *http2Client) Close(err error) {
|
|||
for _, s := range streams {
|
||||
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
for _, sh := range t.statsHandlers {
|
||||
connEnd := &stats.ConnEnd{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1432,7 +1435,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||
close(s.headerChan)
|
||||
}
|
||||
|
||||
if t.statsHandler != nil {
|
||||
for _, sh := range t.statsHandlers {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
|
@ -1440,14 +1443,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||
Header: metadata.MD(mdata).Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
sh.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
28
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
28
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
@ -82,7 +82,7 @@ type http2Server struct {
|
|||
// updates, reset streams, and various settings) to the controller.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
// Keepalive enforcement policy.
|
||||
|
@ -257,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
stats: config.StatsHandler,
|
||||
stats: config.StatsHandlers,
|
||||
kp: kp,
|
||||
idle: time.Now(),
|
||||
kep: kep,
|
||||
|
@ -272,13 +272,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.stats != nil {
|
||||
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.stats {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{}
|
||||
t.stats.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
if err != nil {
|
||||
|
@ -570,8 +570,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
t.adjustWindow(s, uint32(n))
|
||||
}
|
||||
s.ctx = traceCtx(s.ctx, s.method)
|
||||
if t.stats != nil {
|
||||
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range t.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
|
@ -580,7 +580,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.ctxDone = s.ctx.Done()
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
|
||||
|
@ -996,14 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
|||
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: Headers are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
sh.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1064,10 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
// Send a RST_STREAM after the trailers if the client has not already half-closed.
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
|
@ -1222,9 +1222,9 @@ func (t *http2Server) Close() {
|
|||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
connEnd := &stats.ConnEnd{}
|
||||
t.stats.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
5
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
5
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
|
@ -322,8 +322,6 @@ type bufWriter struct {
|
|||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
|
@ -360,9 +358,6 @@ func (w *bufWriter) Flush() error {
|
|||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
|
|
6
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
|
@ -523,7 +523,7 @@ type ServerConfig struct {
|
|||
ConnectionTimeout time.Duration
|
||||
Credentials credentials.TransportCredentials
|
||||
InTapHandle tap.ServerInHandle
|
||||
StatsHandler stats.Handler
|
||||
StatsHandlers []stats.Handler
|
||||
KeepaliveParams keepalive.ServerParameters
|
||||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
|
@ -553,8 +553,8 @@ type ConnectOptions struct {
|
|||
CredsBundle credentials.Bundle
|
||||
// KeepaliveParams stores the keepalive parameters.
|
||||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// StatsHandlers stores the handler for stats.
|
||||
StatsHandlers []stats.Handler
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
|
|
7
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
7
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
|
@ -68,7 +68,6 @@ SOURCES=(
|
|||
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
|
||||
${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
|
||||
${WORKDIR}/grpc-proto/grpc/testing/*.proto
|
||||
${WORKDIR}/grpc-proto/grpc/core/*.proto
|
||||
)
|
||||
|
@ -80,8 +79,7 @@ SOURCES=(
|
|||
# Note that the protos listed here are all for testing purposes. All protos to
|
||||
# be used externally should have a go_package option (and they don't need to be
|
||||
# listed here).
|
||||
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
|
||||
Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||
OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
|
@ -121,9 +119,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
|||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||||
|
||||
# grpc/service_config/service_config.proto does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
|
||||
|
||||
# grpc/testing does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
|
||||
mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
|
||||
|
|
55
vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
55
vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
|
@ -28,25 +28,40 @@ type addressMapEntry struct {
|
|||
// Multiple accesses may not be performed concurrently. Must be created via
|
||||
// NewAddressMap; do not construct directly.
|
||||
type AddressMap struct {
|
||||
m map[string]addressMapEntryList
|
||||
// The underlying map is keyed by an Address with fields that we don't care
|
||||
// about being set to their zero values. The only fields that we care about
|
||||
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
|
||||
// distinguish between addresses with same `Addr` and `ServerName`, but
|
||||
// different `Attributes`, we cannot store the `Attributes` in the map key.
|
||||
//
|
||||
// The comparison operation for structs work as follows:
|
||||
// Struct values are comparable if all their fields are comparable. Two
|
||||
// struct values are equal if their corresponding non-blank fields are equal.
|
||||
//
|
||||
// The value type of the map contains a slice of addresses which match the key
|
||||
// in their `Addr` and `ServerName` fields and contain the corresponding value
|
||||
// associated with them.
|
||||
m map[Address]addressMapEntryList
|
||||
}
|
||||
|
||||
func toMapKey(addr *Address) Address {
|
||||
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
|
||||
}
|
||||
|
||||
type addressMapEntryList []*addressMapEntry
|
||||
|
||||
// NewAddressMap creates a new AddressMap.
|
||||
func NewAddressMap() *AddressMap {
|
||||
return &AddressMap{m: make(map[string]addressMapEntryList)}
|
||||
return &AddressMap{m: make(map[Address]addressMapEntryList)}
|
||||
}
|
||||
|
||||
// find returns the index of addr in the addressMapEntry slice, or -1 if not
|
||||
// present.
|
||||
func (l addressMapEntryList) find(addr Address) int {
|
||||
if len(l) == 0 {
|
||||
return -1
|
||||
}
|
||||
for i, entry := range l {
|
||||
if entry.addr.ServerName == addr.ServerName &&
|
||||
entry.addr.Attributes.Equal(addr.Attributes) {
|
||||
// Attributes are the only thing to match on here, since `Addr` and
|
||||
// `ServerName` are already equal.
|
||||
if entry.addr.Attributes.Equal(addr.Attributes) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int {
|
|||
|
||||
// Get returns the value for the address in the map, if present.
|
||||
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
||||
entryList := a.m[addr.Addr]
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
if entry := entryList.find(addr); entry != -1 {
|
||||
return entryList[entry].value, true
|
||||
}
|
||||
|
@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
|
|||
|
||||
// Set updates or adds the value to the address in the map.
|
||||
func (a *AddressMap) Set(addr Address, value interface{}) {
|
||||
entryList := a.m[addr.Addr]
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
if entry := entryList.find(addr); entry != -1 {
|
||||
a.m[addr.Addr][entry].value = value
|
||||
entryList[entry].value = value
|
||||
return
|
||||
}
|
||||
a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value})
|
||||
a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
|
||||
}
|
||||
|
||||
// Delete removes addr from the map.
|
||||
func (a *AddressMap) Delete(addr Address) {
|
||||
entryList := a.m[addr.Addr]
|
||||
addrKey := toMapKey(&addr)
|
||||
entryList := a.m[addrKey]
|
||||
entry := entryList.find(addr)
|
||||
if entry == -1 {
|
||||
return
|
||||
|
@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) {
|
|||
copy(entryList[entry:], entryList[entry+1:])
|
||||
entryList = entryList[:len(entryList)-1]
|
||||
}
|
||||
a.m[addr.Addr] = entryList
|
||||
a.m[addrKey] = entryList
|
||||
}
|
||||
|
||||
// Len returns the number of entries in the map.
|
||||
|
@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address {
|
|||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Values returns a slice of all current map values.
|
||||
func (a *AddressMap) Values() []interface{} {
|
||||
ret := make([]interface{}, 0, a.Len())
|
||||
for _, entryList := range a.m {
|
||||
for _, entry := range entryList {
|
||||
ret = append(ret, entry.value)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
54
vendor/google.golang.org/grpc/server.go
generated
vendored
54
vendor/google.golang.org/grpc/server.go
generated
vendored
|
@ -73,6 +73,12 @@ func init() {
|
|||
internal.DrainServerTransports = func(srv *Server, addr string) {
|
||||
srv.drainServerTransports(addr)
|
||||
}
|
||||
internal.AddExtraServerOptions = func(opt ...ServerOption) {
|
||||
extraServerOptions = opt
|
||||
}
|
||||
internal.ClearExtraServerOptions = func() {
|
||||
extraServerOptions = nil
|
||||
}
|
||||
}
|
||||
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
|
@ -150,7 +156,7 @@ type serverOptions struct {
|
|||
chainUnaryInts []UnaryServerInterceptor
|
||||
chainStreamInts []StreamServerInterceptor
|
||||
inTapHandle tap.ServerInHandle
|
||||
statsHandler stats.Handler
|
||||
statsHandlers []stats.Handler
|
||||
maxConcurrentStreams uint32
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
|
@ -174,6 +180,7 @@ var defaultServerOptions = serverOptions{
|
|||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
}
|
||||
var extraServerOptions []ServerOption
|
||||
|
||||
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
||||
type ServerOption interface {
|
||||
|
@ -435,7 +442,7 @@ func InTapHandle(h tap.ServerInHandle) ServerOption {
|
|||
// StatsHandler returns a ServerOption that sets the stats handler for the server.
|
||||
func StatsHandler(h stats.Handler) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.statsHandler = h
|
||||
o.statsHandlers = append(o.statsHandlers, h)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -560,6 +567,9 @@ func (s *Server) stopServerWorkers() {
|
|||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
opts := defaultServerOptions
|
||||
for _, o := range extraServerOptions {
|
||||
o.apply(&opts)
|
||||
}
|
||||
for _, o := range opt {
|
||||
o.apply(&opts)
|
||||
}
|
||||
|
@ -867,7 +877,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
|||
ConnectionTimeout: s.opts.connectionTimeout,
|
||||
Credentials: s.opts.creds,
|
||||
InTapHandle: s.opts.inTapHandle,
|
||||
StatsHandler: s.opts.statsHandler,
|
||||
StatsHandlers: s.opts.statsHandlers,
|
||||
KeepaliveParams: s.opts.keepaliveParams,
|
||||
KeepalivePolicy: s.opts.keepalivePolicy,
|
||||
InitialWindowSize: s.opts.initialWindowSize,
|
||||
|
@ -963,7 +973,7 @@ var _ http.Handler = (*Server)(nil)
|
|||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -1076,8 +1086,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
|
||||
}
|
||||
err = t.Write(stream, hdr, payload, opts)
|
||||
if err == nil && s.opts.statsHandler != nil {
|
||||
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
|
||||
if err == nil {
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -1124,13 +1136,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn
|
|||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
shs := s.opts.statsHandlers
|
||||
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
for _, sh := range shs {
|
||||
beginTime := time.Now()
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
|
@ -1161,7 +1173,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
trInfo.tr.Finish()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
for _, sh := range shs {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
|
@ -1243,7 +1255,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
}
|
||||
|
||||
var payInfo *payloadInfo
|
||||
if sh != nil || binlog != nil {
|
||||
if len(shs) != 0 || binlog != nil {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
|
@ -1260,7 +1272,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
if sh != nil {
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: v,
|
||||
|
@ -1418,16 +1430,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
shs := s.opts.statsHandlers
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
if len(shs) != 0 {
|
||||
beginTime := time.Now()
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
IsClientStream: sd.ClientStreams,
|
||||
IsServerStream: sd.ServerStreams,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
}
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ss := &serverStream{
|
||||
|
@ -1439,10 +1453,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
trInfo: trInfo,
|
||||
statsHandler: sh,
|
||||
statsHandler: shs,
|
||||
}
|
||||
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
||||
// See comment in processUnaryRPC on defers.
|
||||
defer func() {
|
||||
if trInfo != nil {
|
||||
|
@ -1456,7 +1470,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
if len(shs) != 0 {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
|
@ -1464,7 +1478,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
|
|
64
vendor/google.golang.org/grpc/stream.go
generated
vendored
64
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
@ -374,9 +374,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
|||
|
||||
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
||||
method := cs.callHdr.Method
|
||||
sh := cs.cc.dopts.copts.StatsHandler
|
||||
var beginTime time.Time
|
||||
if sh != nil {
|
||||
shs := cs.cc.dopts.copts.StatsHandlers
|
||||
for _, sh := range shs {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
|
||||
beginTime = time.Now()
|
||||
begin := &stats.Begin{
|
||||
|
@ -414,12 +414,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
|||
}
|
||||
|
||||
return &csAttempt{
|
||||
ctx: ctx,
|
||||
beginTime: beginTime,
|
||||
cs: cs,
|
||||
dc: cs.cc.dopts.dc,
|
||||
statsHandler: sh,
|
||||
trInfo: trInfo,
|
||||
ctx: ctx,
|
||||
beginTime: beginTime,
|
||||
cs: cs,
|
||||
dc: cs.cc.dopts.dc,
|
||||
statsHandlers: shs,
|
||||
trInfo: trInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -536,8 +536,8 @@ type csAttempt struct {
|
|||
// and cleared when the finish method is called.
|
||||
trInfo *traceInfo
|
||||
|
||||
statsHandler stats.Handler
|
||||
beginTime time.Time
|
||||
statsHandlers []stats.Handler
|
||||
beginTime time.Time
|
||||
|
||||
// set for newStream errors that may be transparently retried
|
||||
allowTransparentRetry bool
|
||||
|
@ -960,8 +960,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
|||
}
|
||||
return io.EOF
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
|
||||
for _, sh := range a.statsHandlers {
|
||||
sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgSent()
|
||||
|
@ -971,7 +971,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
|
|||
|
||||
func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
||||
cs := a.cs
|
||||
if a.statsHandler != nil && payInfo == nil {
|
||||
if len(a.statsHandlers) != 0 && payInfo == nil {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
|
||||
|
@ -1008,8 +1008,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
|||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
|
||||
for _, sh := range a.statsHandlers {
|
||||
sh.HandleRPC(a.ctx, &stats.InPayload{
|
||||
Client: true,
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
|
@ -1068,7 +1068,7 @@ func (a *csAttempt) finish(err error) {
|
|||
ServerLoad: balancerload.Parse(tr),
|
||||
})
|
||||
}
|
||||
if a.statsHandler != nil {
|
||||
for _, sh := range a.statsHandlers {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
BeginTime: a.beginTime,
|
||||
|
@ -1076,7 +1076,7 @@ func (a *csAttempt) finish(err error) {
|
|||
Trailer: tr,
|
||||
Error: err,
|
||||
}
|
||||
a.statsHandler.HandleRPC(a.ctx, end)
|
||||
sh.HandleRPC(a.ctx, end)
|
||||
}
|
||||
if a.trInfo != nil && a.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
|
@ -1445,7 +1445,7 @@ type serverStream struct {
|
|||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandler []stats.Handler
|
||||
|
||||
binlog binarylog.MethodLogger
|
||||
// serverHeaderBinlogged indicates whether server header has been logged. It
|
||||
|
@ -1555,8 +1555,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
Message: data,
|
||||
})
|
||||
}
|
||||
if ss.statsHandler != nil {
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
|
||||
if len(ss.statsHandler) != 0 {
|
||||
for _, sh := range ss.statsHandler {
|
||||
sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1590,7 +1592,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
}
|
||||
}()
|
||||
var payInfo *payloadInfo
|
||||
if ss.statsHandler != nil || ss.binlog != nil {
|
||||
if len(ss.statsHandler) != 0 || ss.binlog != nil {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
|
||||
|
@ -1605,15 +1607,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if ss.statsHandler != nil {
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
if len(ss.statsHandler) != 0 {
|
||||
for _, sh := range ss.statsHandler {
|
||||
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
}
|
||||
}
|
||||
if ss.binlog != nil {
|
||||
ss.binlog.Log(&binarylog.ClientMessage{
|
||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.47.0"
|
||||
const Version = "1.48.0"
|
||||
|
|
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
|
@ -34,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
|
|||
# github.com/VividCortex/ewma v1.2.0
|
||||
## explicit; go 1.12
|
||||
github.com/VividCortex/ewma
|
||||
# github.com/aws/aws-sdk-go v1.44.51
|
||||
# github.com/aws/aws-sdk-go v1.44.53
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
|
@ -159,7 +159,7 @@ github.com/influxdata/influxdb/pkg/escape
|
|||
# github.com/jmespath/go-jmespath v0.4.0
|
||||
## explicit; go 1.14
|
||||
github.com/jmespath/go-jmespath
|
||||
# github.com/klauspost/compress v1.15.7
|
||||
# github.com/klauspost/compress v1.15.8
|
||||
## explicit; go 1.16
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/flate
|
||||
|
@ -306,7 +306,7 @@ golang.org/x/oauth2/jwt
|
|||
# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
## explicit
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d
|
||||
# golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e
|
||||
## explicit; go 1.17
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
golang.org/x/sys/unix
|
||||
|
@ -321,7 +321,7 @@ golang.org/x/text/unicode/norm
|
|||
## explicit; go 1.17
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/api v0.86.0
|
||||
# google.golang.org/api v0.87.0
|
||||
## explicit; go 1.15
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
|
@ -354,7 +354,7 @@ google.golang.org/appengine/internal/socket
|
|||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20220711132622-b6f31b0ceb50
|
||||
# google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d
|
||||
## explicit; go 1.15
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
|
@ -369,7 +369,7 @@ google.golang.org/genproto/googleapis/type/decimal
|
|||
google.golang.org/genproto/googleapis/type/expr
|
||||
google.golang.org/genproto/googleapis/type/fraction
|
||||
google.golang.org/genproto/googleapis/type/month
|
||||
# google.golang.org/grpc v1.47.0
|
||||
# google.golang.org/grpc v1.48.0
|
||||
## explicit; go 1.14
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
|
|
Loading…
Reference in a new issue