当前位置:   article > 正文

go微服务部署k8s 或者docker设置正确核心数_服务启动时核心数

服务启动时核心数

k8s 或docker的部署的时候,go会默认启动和cpu相同的核心数来运作服务
但是go的runtime默认用的是宿主句的核心数,会有多余当前pod的生成,线程间频繁的上线文切换,造成性能损耗,而且宿主机核心数要大,pod核心数越少,问题越严重

总结
必须使用类似的方法设置容器中的核心数,通过runtime.GOMAXPROCS可能会和容器限制核心数不符
/proc/self下是当前进程的信息
/proc/self/cgroup 当前进程的cgroup信息
如下

11:freezer:/
10:memory:/
9:pids:/
8:blkio:/
7:hugetlb:/
6:devices:/
5:net_prio,net_cls:/
4:cpuset:/
3:cpuacct,cpu:/
2:perf_event:/
1:name=systemd:/user.slice/user-1000.slice/session-25788.scope
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

/proc/self/mountinfo 当前进程的挂载信息
如下

26 25 0:22 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
27 18 0:23 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
28 25 0:24 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,perf_event
29 25 0:25 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuacct,cpu
30 25 0:26 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuset
31 25 0:27 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,net_prio,net_cls
32 25 0:28 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices
33 25 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,hugetlb
34 25 0:30 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,blkio
35 25 0:31 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,pids
36 25 0:32 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,memory
37 25 0:33 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,freezer
根据把/self/proc/cgroup中的路径翻译成/self/proc/mountinfo中的
比如cpu就是/sys/fs/cgroup/cpu

这边需要读取/sys/fs/cgroup/cpu/cpu.cfs_period_us和/sys/fs/cgroup/cpu/cpu.cfs_quota_us

而cfs_quota_us/cfs_period_us就是核数
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

首先看到使用方式

import _ "go.uber.org/automaxprocs"

func main() {
  // Your application logic here.
}
  • 1
  • 2
  • 3
  • 4
  • 5

automaxprocs.go 中

    maxprocs.Set(maxprocs.Logger(log.Printf))
}
  • 1
  • 2

maxprocs/maxprocs.go中

func Set(opts ...Option) (func(), error) {
    cfg := &config{
        procs:         iruntime.CPUQuotaToGOMAXPROCS,
        minGOMAXPROCS: 1,
    }
    ...
    maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS)
    if err != nil {
        return undoNoop, err
    }
    ...
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

internal/cgroups/cgroups.go中

const (
    _procPathCGroup    = "/proc/self/cgroup"
    _procPathMountInfo = "/proc/self/mountinfo"
)

func NewCGroupsForCurrentProcess() (CGroups, error) {
    return NewCGroups(_procPathMountInfo, _procPathCGroup)
}

func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
    cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
    if err != nil {
        return nil, err
    }

    cgroups := make(CGroups)
    newMountPoint := func(mp *MountPoint) error {
        if mp.FSType != _cgroupFSType {
            return nil
        }
        ...
        cgroupPath, err := mp.Translate(subsys.Name)
        ...
    }

    parseMountInfo(procPathMountInfo, newMountPoint)
}

func (cg CGroups) CPUQuota() (float64, bool, error) {
    cpuCGroup, exists := cg[_cgroupSubsysCPU]
    if !exists {
        return -1, false, nil
    }

    cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
    if defined := cfsQuotaUs > 0; err != nil || !defined {
        return -1, defined, err
    }

    cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
    if err != nil {
        return -1, false, err
    }

    return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46

internal/cgroups/mountpoint.go中

func NewMountPointFromLine(line string) (*MountPoint, error) {
    ...
}

// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
// and yields parsed *MountPoint into newMountPoint.
func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
    ...
    mountPoint, err := NewMountPointFromLine(scanner.Text())
    newMountPoint(mountPoint)
    ...
}

func (mp *MountPoint) Translate(absPath string) (string, error) {
    ...
    return filepath.Join(mp.MountPoint, relPath), nil
}
// NewCGroup returns a new *CGroup from a given path.
func NewCGroup(path string) *CGroup {
    return &CGroup{path: path}
}

// readInt parses the first line from a cgroup param file as int.
func (cg *CGroup) readInt(param string) (int, error) {
    text, err := cg.readFirstLine(param)
    if err != nil {
        return 0, err
    }
    return strconv.Atoi(text)
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/304918
推荐阅读
相关标签
  

闽ICP备14008679号