mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app: respect CPU limits set via cgroups
Update GOMAXPROCS to limits set via cgroups. This should reduce CPU trashing and reduce memory usage for cases when VictoriaMetrics components run in containers with CPU limits. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685
This commit is contained in:
parent
59d95961b8
commit
6721e47ae9
12 changed files with 102 additions and 59 deletions
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
graphiteserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/graphite"
|
||||
|
@ -59,6 +60,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
if *dryRun {
|
||||
if err := flag.Set("promscrape.config.strictParse", "true"); err != nil {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
|
@ -54,6 +55,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
manager, err := newManager(ctx)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
|
@ -27,6 +28,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
logger.Infof("starting vmauth at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
initAuthConfig()
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
@ -36,6 +37,8 @@ func main() {
|
|||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
if len(*snapshotCreateURL) > 0 {
|
||||
logger.Infof("%s", "Snapshots enabled")
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
|
@ -61,6 +62,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
logger.Infof("initializing netstorage for storageNodes %s...", *storageNodes)
|
||||
startTime := time.Now()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
@ -30,6 +31,8 @@ func main() {
|
|||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
if err != nil {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
|
@ -59,6 +60,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
logger.Infof("starting netstorage at storageNodes %s", *storageNodes)
|
||||
startTime := time.Now()
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/transport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
|
@ -41,6 +42,7 @@ func main() {
|
|||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
cgroup.UpdateGOMAXPROCSToCPUQuota()
|
||||
|
||||
storage.SetMinScrapeIntervalForDeduplication(*minScrapeInterval)
|
||||
storage.SetBigMergeWorkersCount(*bigMergeConcurrency)
|
||||
|
|
40
lib/cgroup/cpu.go
Normal file
40
lib/cgroup/cpu.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// UpdateGOMAXPROCSToCPUQuota updates GOMAXPROCS to cgroup CPU quota if GOMAXPROCS isn't set in environment var.
|
||||
//
|
||||
// This function must be called after logger.Init().
|
||||
func UpdateGOMAXPROCSToCPUQuota() {
|
||||
if v := os.Getenv("GOMAXPROCS"); v != "" {
|
||||
return
|
||||
}
|
||||
q := getCPUQuota()
|
||||
if q <= 0 {
|
||||
// Do not change GOMAXPROCS
|
||||
return
|
||||
}
|
||||
gomaxprocs := int(q + 0.5)
|
||||
if gomaxprocs <= 0 {
|
||||
gomaxprocs = 1
|
||||
}
|
||||
logger.Infof("updating GOMAXPROCS to %d according to cgroup CPU quota", gomaxprocs)
|
||||
runtime.GOMAXPROCS(gomaxprocs)
|
||||
}
|
||||
|
||||
func getCPUQuota() float64 {
|
||||
quotaUS, err := readInt64("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", "cat /sys/fs/cgroup/cpu$(cat /proc/self/cgroup | grep cpu, | cut -d: -f3)/cpu.cfs_quota_us")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
periodUS, err := readInt64("/sys/fs/cgroup/cpu/cpu.cfs_period_us", "cat /sys/fs/cgroup/cpu$(cat /proc/self/cgroup | grep cpu, | cut -d: -f3)/cpu.cfs_period_us")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return float64(quotaUS) / float64(periodUS)
|
||||
}
|
16
lib/cgroup/mem.go
Normal file
16
lib/cgroup/mem.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package cgroup
|
||||
|
||||
// GetMemoryLimit returns cgroup memory limit
|
||||
func GetMemoryLimit() int64 {
|
||||
// Try determining the amount of memory inside docker container.
|
||||
// See https://stackoverflow.com/questions/42187085/check-mem-limit-within-a-docker-container
|
||||
//
|
||||
// Read memory limit according to https://unix.stackexchange.com/questions/242718/how-to-find-out-how-much-memory-lxc-container-is-allowed-to-consume
|
||||
// This should properly determine the limit inside lxc container.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/84
|
||||
n, err := readInt64("/sys/fs/cgroup/memory/memory.limit_in_bytes", "cat /sys/fs/cgroup/memory$(cat /proc/self/cgroup | grep memory | cut -d: -f3)/memory.limit_in_bytes")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
24
lib/cgroup/util.go
Normal file
24
lib/cgroup/util.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func readInt64(path, altCommand string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
// Read data according to https://unix.stackexchange.com/questions/242718/how-to-find-out-how-much-memory-lxc-container-is-allowed-to-consume
|
||||
// This should properly determine the data location inside lxc container.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/84
|
||||
cmd := exec.Command("/bin/sh", "-c", altCommand)
|
||||
data, err = cmd.Output()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
data = bytes.TrimSpace(data)
|
||||
return strconv.ParseInt(string(data), 10, 64)
|
||||
}
|
|
@ -1,11 +1,9 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
|
@ -20,62 +18,9 @@ func sysTotalMemory() int {
|
|||
if uint64(maxInt)/uint64(si.Totalram) > uint64(si.Unit) {
|
||||
totalMem = int(uint64(si.Totalram) * uint64(si.Unit))
|
||||
}
|
||||
|
||||
// Try determining the amount of memory inside docker container.
|
||||
// See https://stackoverflow.com/questions/42187085/check-mem-limit-within-a-docker-container .
|
||||
data, err := ioutil.ReadFile("/sys/fs/cgroup/memory/memory.limit_in_bytes")
|
||||
if err != nil {
|
||||
// Try determining the amount of memory inside lxc container.
|
||||
mem, err := readLXCMemoryLimit(totalMem)
|
||||
if err != nil {
|
||||
return totalMem
|
||||
}
|
||||
return mem
|
||||
}
|
||||
mem, err := readPositiveInt(data, totalMem)
|
||||
if err != nil {
|
||||
mem := cgroup.GetMemoryLimit()
|
||||
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
||||
return totalMem
|
||||
}
|
||||
if mem != totalMem {
|
||||
return mem
|
||||
}
|
||||
|
||||
// Try reading LXC memory limit, since it looks like the cgroup limit doesn't work
|
||||
mem, err = readLXCMemoryLimit(totalMem)
|
||||
if err != nil {
|
||||
return totalMem
|
||||
}
|
||||
return mem
|
||||
}
|
||||
|
||||
func readLXCMemoryLimit(totalMem int) (int, error) {
|
||||
// Read memory limit according to https://unix.stackexchange.com/questions/242718/how-to-find-out-how-much-memory-lxc-container-is-allowed-to-consume
|
||||
// This should properly determine the limit inside lxc container.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/84
|
||||
cmd := exec.Command("/bin/sh", "-c",
|
||||
`cat /sys/fs/cgroup/memory$(cat /proc/self/cgroup | grep memory | cut -d: -f3)/memory.limit_in_bytes`)
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return readPositiveInt(data, totalMem)
|
||||
}
|
||||
|
||||
func readPositiveInt(data []byte, maxN int) (int, error) {
|
||||
for len(data) > 0 && data[len(data)-1] == '\n' {
|
||||
data = data[:len(data)-1]
|
||||
}
|
||||
n, err := strconv.ParseUint(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if int64(n) < 0 || int64(int(n)) != int64(n) {
|
||||
// Int overflow.
|
||||
return maxN, nil
|
||||
}
|
||||
ni := int(n)
|
||||
if ni > maxN {
|
||||
return maxN, nil
|
||||
}
|
||||
return ni, nil
|
||||
return int(mem)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue