当前位置:   article > 正文

k8s-1.21高可用集群搭建_kubernetes 1.21 高可用集群搭建

kubernetes 1.21 高可用集群搭建

前言:
1)本次搭建使用了docker作为引擎
抛弃docker的文档见:《k8s1.20搭建抛弃docker使用containerd》
2)如果是离线安装,镜像准备请看文章《k8s-1.21 安装——镜像准备》

环境说明

系统:centos 7.9

  • 规划如下:
IP地址集群角色节点名其他服务
10.10.239.201mastercrust-m01keepalive/haproxy/docker/kubeadm/kubelet/kubect
10.10.239.202mastercrust-m02keepalive/haproxy/docker/kubeadm/kubelet/kubect
10.10.239.203mastercrust-m03keepalive/haproxy/docker/kubeadm/kubelet/kubect
10.10.239.204nodecrust-n01docker/kubeadm/kubelet/kubect
10.10.239.205nodecrust-n02docker/kubeadm/kubelet/kubect
10.10.239.206nodecrust-n03docker/kubeadm/kubelet/kubect
  • 镜像准备

如离线部署,需要提前下载镜像,见文档 《k8s-1.21 安装——镜像准备》

  • hostname
    按节点名修改每台服务器的hostanme。

  • hosts
    修改 /etc/hosts如下;

10.10.239.201  crust-m01
10.10.239.202  crust-m02
10.10.239.203  crust-m03
10.10.239.204  crust-n01
10.10.239.205  crust-n02
10.10.239.206  crust-n03
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

1. 环境准备

1.1 关闭无用服务

  • 关闭selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g'  /etc/sysconfig/selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g'  /etc/selinux/config
  • 1
  • 2
  • 关闭交换分区
# swapoff -a
  • 1

修改/etc/fstab文件,注释掉 swap一行。

  • 关闭其他服务

有就关闭,没有报错就报错没关系。

# systemctl disable auditd
# systemctl disable postfix
# systemctl disable irqbalance
# systemctl disable remote-fs
# systemctl disable tuned
# systemctl disable rhel-configure
# systemctl disable firewalld
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

1.2 安装依赖

  • 修改yum源
# mkdir /etc/yum.repos.d/bak && cp -rf /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
# wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
# yum clean all && yum makecache
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
 [kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 安装依赖和升级
# yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
# yum -y update
  • 1
  • 2
  • 安装docker

自己做docker 1.19 离线部署包: 下载连接
执行部署包中的compose_install.sh脚本即可。

它会把docker-compose也一起安上,虽然没什么用,但是一个二进制文件对环境也没有影响。

1.3 服务器优化

  • 内核优化
# cat >>/etc/sysctl.conf <<EOF
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog =  32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024  65535
EOF

# sysctl -p
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 修改句柄数

设置临时生效

ulimit -n 65535
  • 1

设置永久生效

# cat >>/etc/security/limits.conf <<EOF
* soft memlock unlimited
* hard memlock unlimited
* soft nofile 1048576
* hard nofile 1048576
* soft nproc 1048576
* hard nproc 1048576
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

全局配置(否则centos7永久设置不生效)
修改/etc/systemd/system.conf文件

DefaultLimitNOFILE=1048576
  • 1
  • 加载内核模块
# cat <<EOF>/etc/sysconfig/modules/ipvs.modules 
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
 /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
 if [ $? -eq 0 ]; then
 /sbin/modprobe \${kernel_module}
 fi
done
EOF

# chmod +x /etc/sysconfig/modules/ipvs.modules
# sh /etc/sysconfig/modules/ipvs.modules
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

2. master的高可用方案

说明:
使用keepalived+VIP 方案完成VIP的漂移。
使用HAProxy 完成负载。
另外:HAProxy和keepalive我们部署在每台master上,也可以单独部署。
在这里插入图片描述

2.1 安装以及配置haproxy

【三个master上操作】

说明:将api默认的6443接口由HAProxy通过 7443分发。因此后边可以看到,加入节点的时候使用的API接口用的是 VIP:7443

  • 安装
# yum -y install haproxy
  • 1
  • 修改配置
    修改/etc/haproxy/haproxy.cfg文件
global
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

defaults
    mode                    tcp
    log                     global
    retries                 3
    timeout connect         10s
    timeout client          1m
    timeout server          1m

frontend  kubernetes
        bind *:7443
        mode tcp
        default_backend kubernetes_master

backend kubernetes_master
    balance     roundrobin
    server master01 10.10.239.201:6443 check maxconn 2000
    server master02 10.10.239.202:6443 check maxconn 2000
    server master03 10.10.239.203:6443 check maxconn 2000
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 启动
# systemctl start haproxy
# systemctl enable haproxy
  • 1
  • 2

2.2 keepalived

  • 安装y
# yum -y install keepalived
  • 1
  • 修改配置文件
    修改 /etc/keepalived/keepalived.conf,从节点按注释修改。
global_defs {
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   // vrrp_strict 
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER   #从节点为BACKUP
    interface eth0  #根据实际情况修改
    virtual_router_id 200
    priority 100   #master最高,根据情况修改  
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.10.239.200    #虚拟ip
    }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 启动服务
# service keepalived start
# systemctl enable keepalive
  • 1
  • 2

3. k8s安装

3.1 安装kubeadm、kubelet、kubect

【所有节点上执行】

# yum install -y kubelet kubeadm kubectl
# systemctl enable kubelet
  • 1
  • 2

3.2 初始化集群

  • 初始化文件
    【crust-01上操作】
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
localAPIEndpoint:
  advertiseAddress: 10.10.239.201
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: crust-m01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
clusterName: kubernetes
kubernetesVersion: v1.21.2
certificatesDir: /etc/kubernetes/pki
controllerManager: {}
controlPlaneEndpoint: "10.10.239.200:7443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  timeoutForControlPlane: 4m0s
  certSANs:
  - "crust-m01"
  - "crust-m02"
  - "crust-m03"
  - "10.10.239.200"
  - "10.10.239.201"
  - "10.10.239.202"
  - "10.10.239.203"
  - "127.0.0.1"
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.1.0.0/16
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53

证书部分apiServer.certSANs,需加入能通过其访问API的地址或主机名(如master/Haproxy/vip等)
networking中service和pod的网段,后边网络插件你不管用 flannel或者calicol,都要和它保持一致。
imageRepository: 的值可以定义为阿里云也可以定义你自己的私有仓库。

  • 初始化集群
# kubeadm init --config=init.yaml
# systemctl enable kubelet
  • 1
  • 2
  • 输出信息
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities 
and service account keys on each node and then running the following as root:

  kubeadm join 10.10.239.200:7443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:b37367fb8a954bf72a8e54c2937c53cddfa09e2431dfbdbb30e0e54f2e8d6dc0 \
        --control-plane	  

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.239.200:7443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:b37367fb8a954bf72a8e54c2937c53cddfa09e2431dfbdbb30e0e54f2e8d6dc0

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

【说明】
以上输出了下边几条有用信息:

  • 如何配置kubectl工具
  • 如何接入其他master节点
  • 如何接入node节点
  • 配置kubectl工具

安装前边数据的信息执行即可

# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
  • 1
  • 2
  • 3

3.3 master从节点安装

  • 从节点拷贝证书
    【crust-01上操作】
# ssh 10.10.239.202 "mkdir -p /etc/kubernetes/pki/etcd"
# ssh 10.10.239.203 "mkdir -p /etc/kubernetes/pki/etcd"

# scp -r /etc/kubernetes/admin.conf 10.10.239.202:/etc/kubernetes/admin.conf
# scp -r /etc/kubernetes/admin.conf 10.10.239.203:/etc/kubernetes/admin.conf

# scp -r /etc/kubernetes/pki/{ca.*,sa.*,front*}  10.10.239.202:/etc/kubernetes/pki/
# scp -r /etc/kubernetes/pki/{ca.*,sa.*,front*}  10.10.239.203:/etc/kubernetes/pki/

# scp -r /etc/kubernetes/pki/etcd/ca.*  10.10.239.202:/etc/kubernetes/pki/etcd/ 
# scp -r /etc/kubernetes/pki/etcd/ca.*  10.10.239.203:/etc/kubernetes/pki/etcd/
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 接入master从节点
    【另外两个master上执行】、

初始化的时候输出了这条命令,我们复制过来执行即可。

  # kubeadm join 10.10.239.200:7443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:b37367fb8a954bf72a8e54c2937c53cddfa09e2431dfbdbb30e0e54f2e8d6dc0 \
        --control-plane	  
  • 1
  • 2
  • 3
  • 配置kubectl工具
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
  • 1
  • 2
  • 3

3.4 网络插件

有很多选择,
一般小的器群我选择 flannel,见 《flannel网络的安装和删除》
大的集群选择calico,见《calico网络安装和删除》

3.5 node节点接入

【所有node节点上执行】、

初始化的时候输出了这条命令,我们复制过来执行即可。

  # kubeadm join 10.10.239.200:7443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:b37367fb8a954bf72a8e54c2937c53cddfa09e2431dfbdbb30e0e54f2e8d6dc0   
  • 1
  • 2

4. 查看结果

  • 查看node

没有网络插件时,如果查看node节点,会显示NotReady。

[root@crust-m03 ~]# kubectl get node
NAME        STATUS   ROLES                  AGE     VERSION
crust-m01   Ready    control-plane,master   24h     v1.21.2
crust-m02   Ready    control-plane,master   23h     v1.21.2
crust-m03   Ready    control-plane,master   23h     v1.21.2
crust-n01   Ready    <none>                 3h22m   v1.21.2
crust-n02   Ready    <none>                 22m     v1.21.2
crust-n03   Ready    <none>                 17m     v1.21.2
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • kube-system中的pod

没有接入网络插件的时候 coredns 会显示 Pending

[root@crust-m01 ~]# kubectl get pod -n kube-system
NAME                                READY   STATUS    RESTARTS   AGE
coredns-59d64cd4d4-8h9b7            1/1     Running   0          15h
coredns-59d64cd4d4-mpfln            1/1     Running   0          15h
etcd-crust-m01                      1/1     Running   2          24h
etcd-crust-m02                      1/1     Running   0          24h
etcd-crust-m03                      1/1     Running   0          24h
kube-apiserver-crust-m01            1/1     Running   2          24h
kube-apiserver-crust-m02            1/1     Running   0          24h
kube-apiserver-crust-m03            1/1     Running   0          24h
kube-controller-manager-crust-m01   1/1     Running   2          24h
kube-controller-manager-crust-m02   1/1     Running   0          24h
kube-controller-manager-crust-m03   1/1     Running   0          24h
kube-flannel-ds-amd64-8fw85         1/1     Running   0          31m
kube-flannel-ds-amd64-b6xts         1/1     Running   0          15h
kube-flannel-ds-amd64-gl542         1/1     Running   0          27m
kube-flannel-ds-amd64-krdzq         1/1     Running   0          3h31m
kube-flannel-ds-amd64-lqv9p         1/1     Running   0          15h
kube-flannel-ds-amd64-wndqk         1/1     Running   0          15h
kube-proxy-7rqmj                    1/1     Running   0          20m
kube-proxy-8s68r                    1/1     Running   0          3h31m
kube-proxy-hqvbs                    1/1     Running   0          31m
kube-proxy-srjq2                    1/1     Running   0          24h
kube-proxy-t6mvq                    1/1     Running   1          24h
kube-proxy-wbs9h                    1/1     Running   0          24h
kube-scheduler-crust-m01            1/1     Running   2          24h
kube-scheduler-crust-m02            1/1     Running   0          24h
kube-scheduler-crust-m03            1/1     Running   0          24h

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

5. 测试

启动一个nginx测试一下(镜像是我私有镜像,注意更换)

  • 创建nginx.yml文件
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: harbocto.xxx.com.cn/public/nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
    nodePort: 30200
  selector:
    app: nginx
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 创建nginx
[root@calico-master ~]# kubectl apply -f nginx.yml
deployment.apps/nginx created
service/nginx created
  • 1
  • 2
  • 3
  • 查看创建结果
[root@calico-master ~]# kubectl get pod
NAME                     READY   STATUS    RESTARTS   AGE
nginx-6764986cbd-ztdjf   1/1     Running   0          5s
[root@calico-master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.1.0.1       <none>        443/TCP        2d2h
nginx        NodePort    10.1.157.245   <none>        80:30200/TCP   12s
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • web上查看

在这里插入图片描述

6. storageclass 的安装


在这里插入图片描述

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/花生_TL007/article/detail/546759
推荐阅读
相关标签
  

闽ICP备14008679号