当前位置:   article > 正文

Ansible+Shell+Containerd部署k8s

Ansible+Shell+Containerd部署k8s

目录

一.可联网正常使用的虚拟机四台

二.main主机部署ansible实现统一管理

1.下载ansible

2.主机清单和配置文件

3.下发密钥进行管理

三.node部署k8s基础准备

1.完善hosts文件,关闭firewalld,disabled掉selinux

2.时间同步、禁用交换分区、配置内核和ipvs

四.部署k8s

1.此处用到的文件

2.具体剧本文件

五.main主机环境配置和集群初始化(放到后面做)

1.此处用到如下文件

2.脚本文件

3.ansible命令行控制node加入集群

六.部署calico网络插件


Ansible+Shell部署K8s,以下各部分脚本和剧本分开书写(便于理解),如有需要可以自行修改合并执行,代码篇幅较长,需要仔细修改自己的主机参数等

一.可联网正常使用的虚拟机四台

hostnameIP
main(作为ansible和k8s主要操作的主机)192.168.2.130
servera192.168.2.131
serverb192.168.2.132
serverc192.168.2.133
  1. [root@main ~]# tail -4 /etc/hosts
  2. 192.168.2.130 main
  3. 192.168.2.131 servera
  4. 192.168.2.132 serverb
  5. 192.168.2.133 serverc

二.main主机部署ansible实现统一管理

1.下载ansible

  1. [root@main ~]# yum install -y epel-release
  2. [root@main ~]# ansible --version
  3. ansible 2.9.27
  4. config file = /root/ansible.cfg
  5. configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  6. ansible python module location = /usr/lib/python2.7/site-packages/ansible
  7. executable location = /usr/bin/ansible
  8. python version = 2.7.5 (default, Nov 14 2023, 16:14:06) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]

2.主机清单和配置文件

  1. [root@main ~]# cat myhosts
  2. [node]
  3. servera
  4. serverb
  5. serverc
  6. [root@main ~]# cat ansible.cfg
  7. [defaults]
  8. inventory=/root/myhosts
  9. remote_user=root
  10. become_user=True
  11. host_key_checking=False
  12. ask_pass=False
  13. gathering=smart
  14. [privilege_escalation]
  15. become=True
  16. become_method=sudo
  17. become_user=root
  18. become_ask_pass=False

3.下发密钥进行管理

  1. [root@main ~]# cat node-key.sh
  2. #!/bin/bash
  3. hosts=("192.168.2.131" "192.168.2.132" "192.168.2.133")
  4. for host in "${hosts[@]}"
  5. do
  6. ssh-copy-id root@$host
  7. done
  8. [root@main k8s]# ansible all -m ping
  9. serverb | SUCCESS => {
  10.   "ansible_facts": {
  11.       "discovered_interpreter_python": "/usr/bin/python"
  12.   },
  13.   "changed": false,
  14.   "ping": "pong"
  15. }
  16. serverc | SUCCESS => {
  17.   "ansible_facts": {
  18.       "discovered_interpreter_python": "/usr/bin/python"
  19.   },
  20.   "changed": false,
  21.   "ping": "pong"
  22. }
  23. servera | SUCCESS => {
  24.   "ansible_facts": {
  25.       "discovered_interpreter_python": "/usr/bin/python"
  26.   },
  27.   "changed": false,
  28.   "ping": "pong"
  29. }

三.node部署k8s基础准备

1.完善hosts文件,关闭firewalld,disabled掉selinux

  1. #文件如下
  2. [root@main k8s]# cat host-firewall-selinux.yaml
  3. ---
  4. - name: hosts-config         #拷贝文件到受管节点并追加(main节点的可以自己配)
  5. hosts: node
  6. tasks:
  7.   - name: copy hostfile
  8.     copy:
  9.       src: /root/k8s/host-config
  10.       dest: /root
  11.   - name: add hostfile
  12.     shell: cat /root/host-config >> /etc/hosts
  13. - name: stop firewalld   #关防火墙
  14. hosts: node
  15. tasks:
  16.   - name: stop it
  17.     service:
  18.       name: firewalld
  19.       state: stopped
  20. - name: change selinux   #disabled掉selinux并重启
  21. hosts: node
  22. tasks:
  23.   - name: change it
  24.     lineinfile:
  25.       path: /etc/selinux/config
  26.       regexp: '^SELINUX='
  27.       line: SELINUX=disabled
  28.   - name: restart hosts
  29.     reboot:
  30. [root@main k8s]# ansible-playbook host-firewall-selinux.yaml
  31. PLAY [hosts-config] **************************************************************************************************************
  32. TASK [Gathering Facts] ***********************************************************************************************************
  33. ok: [serverb]
  34. ok: [serverc]
  35. ok: [servera]
  36. TASK [copy hostfile] *************************************************************************************************************
  37. ok: [serverb]
  38. ok: [serverc]
  39. ok: [servera]
  40. TASK [add hostfile] **************************************************************************************************************
  41. changed: [serverb]
  42. changed: [serverc]
  43. changed: [servera]
  44. PLAY [stop firewalld] ************************************************************************************************************
  45. TASK [stop it] *******************************************************************************************************************
  46. ok: [servera]
  47. ok: [serverc]
  48. ok: [serverb]
  49. PLAY [change selinux] ************************************************************************************************************
  50. TASK [change it] *****************************************************************************************************************
  51. ok: [servera]
  52. ok: [serverb]
  53. ok: [serverc]
  54. TASK [restart hosts] *************************************************************************************************************
  55. changed: [serverb]
  56. changed: [serverc]
  57. changed: [servera]
  58. PLAY RECAP ***********************************************************************************************************************
  59. servera                   : ok=6   changed=2   unreachable=0   failed=0   skipped=0   rescued=0   ignored=0  
  60. serverb                   : ok=6   changed=2   unreachable=0   failed=0   skipped=0   rescued=0   ignored=0  
  61. serverc                   : ok=6   changed=2   unreachable=0   failed=0   skipped=0   rescued=0   ignored=0  

2.时间同步、禁用交换分区、配置内核和ipvs

  1. [root@main k8s]# cat sysctl   #内核文件
  2. vm.swappiness=0
  3. net.bridge.bridge-nf-call-ip6tables = 1
  4. net.bridge.bridge-nf-call-iptables = 1
  5. net.ipv4.ip_forward = 1
  6. [root@main k8s]# cat ipvs   #ipvs文件
  7. #!/bin/bash
  8. modprobe -- ip_vs
  9. modprobe -- ip_vs_rr
  10. modprobe -- ip_vs_wrr
  11. modprobe -- ip_vs_sh
  12. modprobe -- nf_conntrack
  13. [root@main k8s]# cat basic.yaml   #剧本文件
  14. ---
  15. - name: install soft
  16. hosts: node
  17. vars:
  18.   package:
  19.     - wget
  20.     - tree
  21.     - bash-completion
  22.     - lrzsz
  23.     - psmisc
  24.     - net-tools
  25.     - vim
  26.     - chrony
  27.     - ipset
  28.     - ipvsadm
  29. tasks:
  30.   - yum:
  31.       name: "{{ package }}"
  32.       state: latest
  33. - name: config chronyd
  34. hosts: node
  35. tasks:
  36.   - service:
  37.       name: chronyd
  38.       state: started
  39.   - shell: sed -i -e '/^server/s/^/# /' -e '$ a\server ntp1.aliyun.com iburst' /etc/chrony.conf
  40.   - service:
  41.       name: chronyd
  42.       state: restarted
  43.   - shell: chronyc sources
  44. - name: swapoff
  45. hosts: node
  46. tasks:
  47.   - shell: swapoff -a && sed -i 's/.*swap.*/#&/' /etc/fstab
  48. - name: sysctl
  49. hosts: node
  50. tasks:
  51.   - copy:
  52.       src: /root/k8s/sysctl
  53.       dest: /root
  54.   - shell: cat /root/sysctl > /etc/sysctl.conf && modprobe br_netfilter && modprobe overlay && sysctl -p
  55. - name: ipvs
  56. hosts: node
  57. tasks:
  58.   - copy:
  59.       src: /root/k8s/ipvs
  60.       dest: /root
  61.   - shell: cat /root/ipvs > /etc/sysconfig/modules/ipvs.modules && chmod +x /etc/sysconfig/modules/ipvs.modules && /bin/bash /etc/sysconfig/modules/ipvs.modules

四.部署k8s

1.此处用到的文件

  1. [root@main k8s]# cat k8s-image
  2. [kubernetes]
  3. name=Kubernetes
  4. baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  5. enabled=1
  6. gpgcheck=0
  7. repo_gpgcheck=0
  8. gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  9. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  10. [root@main k8s]# cat crictl
  11. runtime-endpoint: unix:///run/containerd/containerd.sock
  12. image-endpoint: unix:///run/containerd/containerd.sock
  13. timeout: 10
  14. debug: false

2.具体剧本文件

  1. [root@main k8s]# cat nodeconfig.yaml
  2. ---
  3. - name: install k8s
  4. hosts: node
  5. vars:
  6.   package:
  7.     - kubeadm
  8.     - kubelet
  9.     - kubectl
  10. tasks:
  11.   - copy:
  12.       src: /root/k8s/k8s-image
  13.       dest: /etc/yum.repos.d/kubernetes.repo
  14.   - yum:
  15.       name: "{{ package }}"
  16.       state: latest
  17.   - shell: echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd" \
  18.                   KUBE_PROXY_MODE="ipvs"' /etc/sysconfig/kubelet
  19.   - service:
  20.       name: kubelet
  21.       state: started
  22. - name: install containerd
  23. hosts: node
  24. vars:
  25.   package:
  26.     - yum-utils
  27.     - device-mapper-persistent-data
  28.     - lvm2
  29. tasks:
  30.   - yum:
  31.       name: "{{ package }}"
  32.       state: latest
  33.   - shell: yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  34.   - shell: sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
  35.   - yum:
  36.       name: containerd
  37.       state: latest
  38.   - shell: containerd config default | tee /etc/containerd/config.toml
  39.   - shell: sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
  40.   - shell: sed -i "s#registry.k8s.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
  41. - name: pull image
  42. hosts: node
  43. tasks:
  44.   - copy:
  45.       src: /root/k8s/crictl
  46.       dest: /root
  47.   - shell: cat /root/crictl > /etc/crictl.yaml
  48.   - shell: systemctl daemon-reload
  49.   - service:
  50.       name: containerd
  51.       state: started

五.main主机环境配置和集群初始化(放到后面做)

使用无脑简单shell脚本完成,篇幅长,建议下载下来仔细修改你所需要的内容

1.此处用到如下文件

  1. [root@main k8s]# cat host-config
  2. 192.168.2.130 main
  3. 192.168.2.131 servera
  4. 192.168.2.132 serverb
  5. 192.168.2.133 serverc
  6. [root@main k8s]# cat sysctl
  7. vm.swappiness=0
  8. net.bridge.bridge-nf-call-ip6tables = 1
  9. net.bridge.bridge-nf-call-iptables = 1
  10. net.ipv4.ip_forward = 1
  11. [root@main k8s]# cat ipvs
  12. #!/bin/bash
  13. modprobe -- ip_vs
  14. modprobe -- ip_vs_rr
  15. modprobe -- ip_vs_wrr
  16. modprobe -- ip_vs_sh
  17. modprobe -- nf_conntrack
  18. [root@main k8s]# cat k8s-image
  19. [kubernetes]
  20. name=Kubernetes
  21. baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  22. enabled=1
  23. gpgcheck=0
  24. repo_gpgcheck=0
  25. gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  26. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  27. [root@main k8s]# cat crictl
  28. runtime-endpoint: unix:///run/containerd/containerd.sock
  29. image-endpoint: unix:///run/containerd/containerd.sock
  30. timeout: 10
  31. debug: false

2.脚本文件

  1. [root@main k8s]# cat main.sh
  2. #!/bin/bash
  3. cat /root/k8s/host-config >> /etc/hosts && tail -4 /etc/hosts
  4. echo #hosts配置
  5. systemctl disable firewalld && systemctl status firewalld | grep Active
  6. echo   #防火墙
  7. sed -i '/^SELINUX=/ c SELINUX=disabled' /etc/selinux/config
  8. yum install -y wget tree bash-completion lrzsz psmisc net-tools vim chrony ipset ipvsadm
  9. swapoff -a && sed -i 's/.*swap.*/#&/' /etc/fstab && free -m
  10. echo   #selinux、交换分区以及软件下载
  11. cat /root/k8s/sysctl > /etc/sysctl.conf && modprobe br_netfilter && modprobe overlay && sysctl -p
  12. echo   #内核
  13. cat /root/k8s/ipvs > /etc/sysconfig/modules/ipvs.modules && chmod +x /etc/sysconfig/modules/ipvs.modules && /bin/bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
  14. echo   #ipvs转发
  15. cat /root/k8s/k8s-image > /etc/yum.repos.d/kubernetes.repo && yum install -y kubeadm kubelet kubectl && kubeadm version
  16. echo   #下载k8s所需包
  17. echo KUBELET_EXTRA_ARGS="--cgroup-driver=systemd" \
  18.     KUBE_PROXY_MODE="ipvs" && systemctl start kubelet && systemctl enable kubelet
  19. echo   #修改组
  20. yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
  21. yum install -y containerd && containerd config default | tee /etc/containerd/config.toml && sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml && sed -i "s#registry.k8s.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml && crictl --version
  22. echo   #下载containerd
  23. cat /root/k8s/crictl > /etc/crictl.yaml && systemctl daemon-reload && systemctl start containerd && systemctl enable containerd && crictl pull nginx && crictl images
  24. echo
  25. kubeadm config print init-defaults > /root/k8s/kubeadm.yml   #此处要仔细修改为你的环境
  26. sed -i 's/advertiseAddress:.*/advertiseAddress: 192.168.2.130/g' /root/k8s/kubeadm.yml
  27. sed -i 's/name:.*/name: main/g' /root/k8s/kubeadm.yml
  28. sed -i 's/imageRepository:.*/imageRepository: registry.aliyuncs.com\/google_containers/g' /root/k8s/kubeadm.yml
  29. sed -i 's/kubernetesVersion:.*/kubernetesVersion: 1.28.2/g' /root/k8s/kubeadm.yml
  30. systemctl restart containerd
  31. kubeadm config images pull --config /root/k8s/kubeadm.yml
  32. crictl images
  33. echo
  34. kubeadm init --config=/root/k8s/kubeadm.yml --upload-certs --v=6 && export KUBECONFIG=/etc/kubernetes/admin.conf   #export此处为root用户时的做法,普通用户时需要修改为如下
  35.  
  36. “mkdir -p $HOME/.kube
  37.   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  38.   sudo chown $(id -u):$(id -g) $HOME/.kube/config“

3.ansible命令行控制node加入集群

  1. ......     #执行完脚本后的页面
  2. kubeadm join 192.168.2.130:6443 --token abcdef.0123456789abcdef \
  3. --discovery-token-ca-cert-hash sha256:4100be7411051d31e4a953d2450ad2a7b6802df204373f539ca4602d35cb38b8              
  4. [root@main k8s]# ansible node -m shell -a "kubeadm join 192.168.2.130:6443 --token abcdef.0123456789abcdef \
  5. > --discovery-token-ca-cert-hash sha256:4100be7411051d31e4a953d2450ad2a7b6802df204373f539ca4602d35cb38b8"
  6. [root@main k8s]# kubectl get nodes
  7. NAME     STATUS     ROLES           AGE   VERSION
  8. main     NotReady   control-plane   71s   v1.28.2
  9. servera   NotReady   <none>         22s   v1.28.2
  10. serverb   NotReady   <none>         22s   v1.28.2
  11. serverc   NotReady   <none>         22s   v1.28.2

六.部署calico网络插件

  1. [root@main k8s]# cat calico.sh
  2. #!/bin/bash
  3. yum install -y bash-completion
  4. source /usr/share/bash-completion/bash_completion && source <(kubectl completion bash) && echo "source <(kubectl completion bash)" >> ~/.bashrc
  5. wget --no-check-certificate https://projectcalico.docs.tigera.io/archive/v3.25/manifests/calico.yaml
  6. sed -i '/value: "k8s,bgp"/a \           - name: IP_AUTODETECTION_METHOD\n             value: "interface=ens33"' calico.yaml
  7. kubectl apply -f calico.yaml
  8. [root@main k8s]# kubectl get pods -A
  9. NAMESPACE     NAME                                       READY   STATUS   RESTARTS   AGE
  10. kube-system   calico-kube-controllers-658d97c59c-b6rwh   1/1     Running   0         9m31s
  11. kube-system   calico-node-czlml                         1/1     Running   0         9m31s
  12. kube-system   calico-node-jh7bn                         1/1     Running   0         9m31s
  13. kube-system   calico-node-kq966                         1/1     Running   0         9m31s
  14. kube-system   calico-node-twjct                         1/1     Running   0         9m31s
  15. kube-system   coredns-66f779496c-27vss                   1/1     Running   0         78m
  16. kube-system   coredns-66f779496c-fn7fc                   1/1     Running   0         78m
  17. kube-system   etcd-main                                 1/1     Running   2         78m
  18. kube-system   kube-apiserver-main                       1/1     Running   2         78m
  19. kube-system   kube-controller-manager-main               1/1     Running   2         78m
  20. kube-system   kube-proxy-lfg2b                           1/1     Running   0         77m
  21. kube-system   kube-proxy-rzmgs                           1/1     Running   0         77m
  22. kube-system   kube-proxy-s2nzk                           1/1     Running   0         78m
  23. kube-system   kube-proxy-tp5dn                           1/1     Running   0         77m
  24. kube-system   kube-scheduler-main                       1/1     Running   2         78m

声明:本文内容由网友自发贡献,转载请注明出处:【wpsshop】
推荐阅读
  

闽ICP备14008679号