当前位置:   article > 正文

6.cicd(k8s版)

6.cicd(k8s版)

实验环境

主机名IP备注
kube-20192.168.188.20workers
kube-21192.168.188.21node
kube-22192.168.188.22node
gitlab-30192.168.188.30gitlab
Jenkins-40192.168.188.40Jenkins、git、kubectl、docker

一、安装docker

环境准备

[root@Jenkins-40 ~]# yum install -y wget
[root@Jenkins-40 ~]# cd /etc/yum.repos.d/
[root@Jenkins-40 yum.repos.d]# mkdir bak
[root@Jenkins-40 yum.repos.d]# mv ./* bak/
  • 1
  • 2
  • 3
  • 4
# 准备Base源
[root@Jenkins-40 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

[root@Jenkins-40 yum.repos.d]# yum makecache
  • 1
  • 2
  • 3
  • 4

开始安装

打开阿里源的docker-ce源里面也有教程

# step 1: 安装必要的一些系统工具
[root@Jenkins-40 yum.repos.d]# yum install -y yum-utils device-mapper-persistent-data lvm2

# Step 2: 添加软件源信息
[root@Jenkins-40 yum.repos.d]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# Step 3
[root@Jenkins-40 yum.repos.d]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

# Step 4: 更新并安装Docker-CE
[root@Jenkins-40 yum.repos.d]# yum makecache fast
[root@Jenkins-40 yum.repos.d]# yum -y install docker-ce

# Step 4: 开启Docker服务
[root@Jenkins-40 yum.repos.d]# service docker start
[root@Jenkins-40 yum.repos.d]# systemctl enable docker
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

镜像加速

这里采用阿里云的镜像加速,https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors

mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://niphmo8u.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

二、Kubernetes部署

关闭安全策略与防火墙

关闭防火墙是为了方便日常使用,不会给我们造成困扰。在生成环境中建议打开。

# 安全策略
# 永久关闭
sed -i 's#enforcing#disabled#g' /etc/sysconfig/selinux
# 临时关闭
setenforce 0

# 防火墙
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

关闭 swap 分区

一旦触发 swap,会导致系统性能急剧下降,所以一般情况下,K8S 要求关闭swap 分区。

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet
  • 1
  • 2
  • 3

配置国内 yum 源

cd /etc/yum.repos.d/
mkdir bak
mv ./* bak/
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum makecache
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

升级内核版本

由于 Docker 运行需要较新的系统内核功能,例如 ipvs 等等,所以一般情况下,我们需要使用4.0+以上版本的系统内核。

### 载入公钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org


### 安装ELRepo
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm


### 载入elrepo-kernel元数据
yum --disablerepo=\* --enablerepo=elrepo-kernel repolist   # 37个


### 查看可用的rpm包
yum --disablerepo=\* --enablerepo=elrepo-kernel list kernel*


### 安装长期支持版本的kernel
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-lt.x86_64


### 删除旧版本工具包
yum remove kernel-tools-libs.x86_64 kernel-tools.x86_64 -y


### 安装新版本工具包
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-lt-tools.x86_64


### 查看默认启动顺序
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg  
CentOS Linux (4.4.183-1.el7.elrepo.x86_64) 7 (Core)  
CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)  
CentOS Linux (0-rescue-c52097a1078c403da03b8eddeac5080b) 7 (Core)
#默认启动的顺序是从0开始,新内核是从头插入(目前位置在0,而4.4.4的是在1),所以需要选择0。
grub2-set-default 0  
#重启并检查
reboot

Ubuntu16.04
#打开 http://kernel.ubuntu.com/~kernel-ppa/mainline/ 并选择列表中选择你需要的版本(以4.16.3为例)。
#接下来,根据你的系统架构下载 如下.deb 文件:
Build for amd64 succeeded (see BUILD.LOG.amd64):
  linux-headers-4.16.3-041603_4.16.3-041603.201804190730_all.deb
  linux-headers-4.16.3-041603-generic_4.16.3-041603.201804190730_amd64.deb
  linux-image-4.16.3-041603-generic_4.16.3-041603.201804190730_amd64.deb
#安装后重启即可
sudo dpkg -i *.deb
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47

安装 ipvs

ipvs 是系统内核中的一个模块,其网络转发性能很高。一般情况下,我们首选ipvs。

# 安装 IPVS
yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

# 加载 IPVS 模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do 
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
  fi
done
EOF

# 验证
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17

内核参数优化

内核参数优化的主要目的是使其更适合 kubernetes 的正常运行。

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF


# 立即生效
sysctl --system
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28

安装 Docker

主要是作为 k8s 管理的常用的容器工具之一。

# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2

# Step 2: 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# Step 3
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

# Step 4: 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce

# Step 4: 开启Docker服务
service docker start
systemctl enable docker
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
# Step 5: 镜像加速
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"],   # 添加这配置
  "registry-mirrors": ["https://niphmo8u.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

同步集群时间

master

[root@kube-20 ~]# vim /etc/chrony.conf
[root@kube-20 ~]# grep -Ev "#|^$" /etc/chrony.conf
server 3.centos.pool.ntp.org iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.0.0/16
logdir /var/log/chrony
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

node

vim /etc/chrony.conf
grep -Ev "#|^$" /etc/chrony.conf
server 192.168.188.20 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

all

systemctl restart chronyd
# 验证
date
  • 1
  • 2
  • 3

映射

master

[root@kube-20 ~]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.188.20 kube-20
192.168.188.21 kube-21
192.168.188.22 kube-22

[root@kube-20 ~]# scp -p /etc/hosts 192.168.188.21:/etc/hosts
[root@kube-20 ~]# scp -p /etc/hosts 192.168.188.22:/etc/hosts
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

配置 Kubernetes 源

这里配置的是阿里源,可以去https://developer.aliyun.com/mirror/kubernetes?spm=a2c6h.13651102.0.0.3e221b11KGjWvc看教程

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#setenforce 0

#yum install -y kubelet kubeadm kubectl

#systemctl enable kubelet && systemctl start kubelet

# 注意
# 由于官网未开放同步方式, 可能会有索引gpg检查失败的情况, 这时请用 yum install -y --nogpgcheck kubelet kubeadm kubectl 安装


# 这里安装的是1.22.3版本
yum makecache --nogpgcheck

yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3

systemctl enable kubelet.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26

镜像拉取

因为国内无法直接拉取镜像回来,所以这里自己构建阿里云的code与阿里云的容器镜像服务进行构建拉取

master

# 打印 kubeadm 将使用的镜像列表。 配置文件用于自定义任何镜像或镜像存储库的情况
[root@kube-20 ~]# kubeadm config images list
I0526 12:52:43.766362    3813 version.go:255] remote version is much newer: v1.24.1; falling back to: stable-1.22
k8s.gcr.io/kube-apiserver:v1.22.10
k8s.gcr.io/kube-controller-manager:v1.22.10
k8s.gcr.io/kube-scheduler:v1.22.10
k8s.gcr.io/kube-proxy:v1.22.10
k8s.gcr.io/pause:3.5
k8s.gcr.io/etcd:3.5.0-0
k8s.gcr.io/coredns/coredns:v1.8.4
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

all

# 构建好拉取镜像下来
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/kube-apiserver:v1.22.10
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/kube-controller-manager:v1.22.10
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/kube-scheduler:v1.22.10
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/kube-proxy:v1.22.10
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/pause:3.5
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/etcd:3.5.0-0
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/coredns:v1.8.4

# 重新打tag,还原成查询出来的样式
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/kube-apiserver:v1.22.10 k8s.gcr.io/kube-apiserver:v1.22.10
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/kube-controller-manager:v1.22.10 k8s.gcr.io/kube-controller-manager:v1.22.10
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/kube-scheduler:v1.22.10 k8s.gcr.io/kube-scheduler:v1.22.10
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/kube-proxy:v1.22.10 k8s.gcr.io/kube-proxy:v1.22.10
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/pause:3.5 k8s.gcr.io/pause:3.5
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/etcd:3.5.0-0 k8s.gcr.io/etcd:3.5.0-0
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/coredns:v1.8.4 k8s.gcr.io/coredns/coredns:v1.8.4
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17

节点初始化

master

[root@kube-20 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.188.20
  • 1
# 初始化完毕在最后有两个步骤提示,分别是在master创建目录和一条24h时效的token,需要在规定时间内使用添加节点
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


# get nodes 命令就提供了 Kubernetes 的状态、角色和版本
# kubectl get no 或者 kubectl get nodes
[root@kube-20 ~]# kubectl get no
NAME     STATUS     ROLES                  AGE    VERSION
kube-20   NotReady   control-plane,master   6m9s   v1.22.3
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

node

kubeadm join 192.168.188.20:6443 --token cp36la.obg1332jj7wl11az \
	--discovery-token-ca-cert-hash sha256:ee5053647a18fc69b59b648c7e3f7a8f039d5553531d627793242d193879e0ba
	
	
	
# 当失效的时候可以使用以下命令重新生成
# 新令牌
kubeadm token create --print-join-command
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

master

[root@kube-20 ~]# kubectl get no
NAME     STATUS     ROLES                  AGE     VERSION
kube-20   NotReady   control-plane,master   6m55s   v1.22.3
kube-21   NotReady   <none>                 18s     v1.22.3
kube-22   NotReady   <none>                 8s      v1.22.3


# 每个 get 命令都可以使用 –namespace 或 -n 参数指定对应的命名空间。这点对于查看 kube-system 中的 Pods 会非常有用,因为这些 Pods 是 Kubernetes 自身运行所需的服务。

[root@kube-20 ~]# kubectl get po -n kube-system   # 此时有几个服务是无法使用,因为缺少网络插件
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

安装网络插件

kubernetes 需要使用第三方的网络插件来实现 kubernetes 的网络功能,这样一来,安装网络插件成为必要前提;第三方网络插件有多种,常用的有 flannel、calico 和 cannel(flannel+calico),不同的网络组件,都提供基本的网络功能,为各个 Node 节点提供 IP 网络等。

kubernetes 设计了网络模型,但却将它的实现交给了网络插件,CNI 网络插件最主要的功能就是实现POD资源能够跨主机进行通讯。常见的 CNI 网络插件: 1. Flannel 2. Calico 3. Canal 4. Contiv 5. OpenContrail 6. NSX-T 7. Kube-router

这里使用flannel,可以来这里保存这个yml文件上传到服务器https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml

master

[root@kube-20 ~]# ls
anaconda-ks.cfg  flannel.yml

# 某些命令需要配置文件,而 apply 命令可以在集群内调整配置文件应用于资源。虽然也可以通过命令行 standard in (STNIN) 来完成,但 apply 命令更好一些,因为它可以让你知道如何使用集群,以及要应用哪种配置文件。
# 可以应用几乎任何配置,但是一定要明确所要应用的配置,否则可能会引发意料之外的后果。

[root@kube-20 ~]# kubectl apply -f flannel.yml
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

all

# 当拉不下来镜像的时候可以从阿里云自己搭建的镜像仓库中的构建进行拉取,包括前面也是采用这个方法拉取的镜像
# 拉取
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/mirrored-flannelcni-flannel:v0.17.0
docker pull registry.cn-shenzhen.aliyuncs.com/uplooking/mirrored-flannelcni-flannel-cni-plugin:v1.0.1

# 打标
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/mirrored-flannelcni-flannel:v0.17.0 rancher/mirrored-flannelcni-flannel:v0.17.0
docker tag registry.cn-shenzhen.aliyuncs.com/uplooking/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

master

[root@kube-20 ~]# kubectl get no   # 检查状态,此时全为Ready证明集群初步完成且正常
NAME     STATUS   ROLES                  AGE   VERSION
kube-20   Ready    control-plane,master   47m   v1.22.3
kube-21   Ready    <none>                 41m   v1.22.3
kube-22   Ready    <none>                 40m   v1.22.3
  • 1
  • 2
  • 3
  • 4
  • 5

kube-proxy开启ipvs

kubernetes 需要使用第三方的网络插件来实现 kubernetes 的网络功能,这样一来,安装网络插件成为必要前提;第三方网络插件有多种,常用的有 flanneld、calico 和 cannel(flanneld+calico),不同的网络组件,都提供基本的网络功能,为各个 Node 节点提供 IP 网络等。默认使用iptables。

当创建好资源后,如果需要修改,该怎么办?这时候就需要 kubectl edit 命令了。

可以用这个命令编辑集群中的任何资源。它会打开默认文本编辑器。

master

# 更改kube-proxy配置
[root@kube-20 ~]# kubectl edit configmap kube-proxy -n kube-system
  • 1
  • 2
找到如下部分的内容
   minSyncPeriod: 0s
      scheduler: ""
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"                          # 加上这个
    nodePortAddresses: null
其中mode原来是空,默认为iptables模式,改为ipvs
scheduler默认是空,默认负载均衡算法为轮训
 编辑完,保存退出
3、删除所有kube-proxy的pod
kubectl delete pod xxx -n kube-system

# kubectl delete po `kubectl get po -n kube-system | grep proxy | awk '{print $1}'` -n kube-system

4、查看kube-proxy的pod日志
kubectl logs kube-proxy-xxx   -n kube-system
.有.....Using ipvs Proxier......即可.

或者ipvsadm -l
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
# 删除对应kube-proxy的pod重新生成
# 删除指定命名空间内的kube-proxy的pod
# kubectl delete ns xxxx   删除整个命名空间


[root@kube-20 ~]# kubectl get po -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-78fcd69978-d8cv5         1/1     Running   0          6m43s
coredns-78fcd69978-qp7f6         1/1     Running   0          6m43s
etcd-kube-20                      1/1     Running   0          6m57s
kube-apiserver-kube-20            1/1     Running   0          6m59s
kube-controller-manager-kube-20   1/1     Running   0          6m58s
kube-flannel-ds-88kmk            1/1     Running   0          2m58s
kube-flannel-ds-wfvst            1/1     Running   0          2m58s
kube-flannel-ds-wq2vz            1/1     Running   0          2m58s
kube-proxy-4fpm9                 1/1     Running   0          6m28s
kube-proxy-hhb5s                 1/1     Running   0          6m25s
kube-proxy-jr5kl                 1/1     Running   0          6m43s
kube-scheduler-kube-20            1/1     Running   0          6m57s

[root@kube-20 ~]# kubectl delete pod kube-proxy-4fpm9 -n kube-system
pod "kube-proxy-4fpm9" deleted
[root@kube-20 ~]# kubectl delete pod kube-proxy-hhb5s -n kube-system
pod "kube-proxy-hhb5s" deleted
[root@kube-20 ~]# kubectl delete pod kube-proxy-jr5kl -n kube-system
pod "kube-proxy-jr5kl" deleted



# 检查集群状态
[root@kube-20 ~]# kubectl get po -n kube-system   # 此时已经重新生成kube-proxy的pod

# 检查ipvs
[root@kube-20 ~]# ipvsadm -l
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34

Ingress NGINX

官方的yaml文件,我把其中的镜像拉下来放置在阿里云上并进行对应修改,请根据这个https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/baremetal/deploy.yaml修改

ingress-nginx

apiVersion: v1
kind: Namespace
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - namespaces
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - configmaps
  - pods
  - secrets
  - endpoints
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resourceNames:
  - ingress-controller-leader
  resources:
  - configmaps
  verbs:
  - get
  - update
- apiGroups:
  - ""
  resources:
  - configmaps
  verbs:
  - create
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - secrets
  verbs:
  - get
  - create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - configmaps
  - endpoints
  - nodes
  - pods
  - secrets
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
rules:
- apiGroups:
  - admissionregistration.k8s.io
  resources:
  - validatingwebhookconfigurations
  verbs:
  - get
  - update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: v1
data:
  allow-snippet-annotations: "true"
kind: ConfigMap
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  ports:
  - appProtocol: http
    name: http
    port: 80
    protocol: TCP
    targetPort: http
  - appProtocol: https
    name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: NodePort
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-controller-admission
  namespace: ingress-nginx
spec:
  ports:
  - appProtocol: https
    name: https-webhook
    port: 443
    targetPort: webhook
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  minReadySeconds: 0
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/component: controller
      app.kubernetes.io/instance: ingress-nginx
      app.kubernetes.io/name: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/component: controller
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
    spec:
      containers:
      - args:
        - /nginx-ingress-controller
        - --election-id=ingress-controller-leader
        - --controller-class=k8s.io/ingress-nginx
        - --ingress-class=nginx
        - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
        - --validating-webhook=:8443
        - --validating-webhook-certificate=/usr/local/certificates/cert
        - --validating-webhook-key=/usr/local/certificates/key
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LD_PRELOAD
          value: /usr/local/lib/libmimalloc.so
        image: registry.cn-shenzhen.aliyuncs.com/uplooking/ingress-nginx-controller:v1.2.0
        imagePullPolicy: IfNotPresent
        lifecycle:
          preStop:
            exec:
              command:
              - /wait-shutdown
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        - containerPort: 8443
          name: webhook
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 100m
            memory: 90Mi
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          runAsUser: 101
        volumeMounts:
        - mountPath: /usr/local/certificates/
          name: webhook-cert
          readOnly: true
      dnsPolicy: ClusterFirst
      nodeSelector:
        kubernetes.io/os: linux
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
      volumes:
      - name: webhook-cert
        secret:
          secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission-create
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.2.0
      name: ingress-nginx-admission-create
    spec:
      containers:
      - args:
        - create
        - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
        - --namespace=$(POD_NAMESPACE)
        - --secret-name=ingress-nginx-admission
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-shenzhen.aliyuncs.com/uplooking/ingress-nginx-kube-webhook-certgen:v1.1.1
        imagePullPolicy: IfNotPresent
        name: create
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission-patch
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.2.0
      name: ingress-nginx-admission-patch
    spec:
      containers:
      - args:
        - patch
        - --webhook-name=ingress-nginx-admission
        - --namespace=$(POD_NAMESPACE)
        - --patch-mutating=false
        - --secret-name=ingress-nginx-admission
        - --patch-failure-policy=Fail
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-shenzhen.aliyuncs.com/uplooking/ingress-nginx-kube-webhook-certgen:v1.1.1
        imagePullPolicy: IfNotPresent
        name: patch
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: nginx
spec:
  controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.2.0
  name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
  - v1
  clientConfig:
    service:
      name: ingress-nginx-controller-admission
      namespace: ingress-nginx
      path: /networking/v1/ingresses
  failurePolicy: Fail
  matchPolicy: Equivalent
  name: validate.nginx.ingress.kubernetes.io
  rules:
  - apiGroups:
    - networking.k8s.io
    apiVersions:
    - v1
    operations:
    - CREATE
    - UPDATE
    resources:
    - ingresses
  sideEffects: None
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408
  • 409
  • 410
  • 411
  • 412
  • 413
  • 414
  • 415
  • 416
  • 417
  • 418
  • 419
  • 420
  • 421
  • 422
  • 423
  • 424
  • 425
  • 426
  • 427
  • 428
  • 429
  • 430
  • 431
  • 432
  • 433
  • 434
  • 435
  • 436
  • 437
  • 438
  • 439
  • 440
  • 441
  • 442
  • 443
  • 444
  • 445
  • 446
  • 447
  • 448
  • 449
  • 450
  • 451
  • 452
  • 453
  • 454
  • 455
  • 456
  • 457
  • 458
  • 459
  • 460
  • 461
  • 462
  • 463
  • 464
  • 465
  • 466
  • 467
  • 468
  • 469
  • 470
  • 471
  • 472
  • 473
  • 474
  • 475
  • 476
  • 477
  • 478
  • 479
  • 480
  • 481
  • 482
  • 483
  • 484
  • 485
  • 486
  • 487
  • 488
  • 489
  • 490
  • 491
  • 492
  • 493
  • 494
  • 495
  • 496
  • 497
  • 498
  • 499
  • 500
  • 501
  • 502
  • 503
  • 504
  • 505
  • 506
  • 507
  • 508
  • 509
  • 510
  • 511
  • 512
  • 513
  • 514
  • 515
  • 516
  • 517
  • 518
  • 519
  • 520
  • 521
  • 522
  • 523
  • 524
  • 525
  • 526
  • 527
  • 528
  • 529
  • 530
  • 531
  • 532
  • 533
  • 534
  • 535
  • 536
  • 537
  • 538
  • 539
  • 540
  • 541
  • 542
  • 543
  • 544
  • 545
  • 546
  • 547
  • 548
  • 549
  • 550
  • 551
  • 552
  • 553
  • 554
  • 555
  • 556
  • 557
  • 558
  • 559
  • 560
  • 561
  • 562
  • 563
  • 564
  • 565
  • 566
  • 567
  • 568
  • 569
  • 570
  • 571
  • 572
  • 573
  • 574
  • 575
  • 576
  • 577
  • 578
  • 579
  • 580
  • 581
  • 582
  • 583
  • 584
  • 585
  • 586
  • 587
  • 588
  • 589
  • 590
  • 591
  • 592
  • 593
  • 594
  • 595
  • 596
  • 597
  • 598
  • 599
  • 600
  • 601
  • 602
  • 603
  • 604
  • 605
  • 606
  • 607
  • 608
  • 609
  • 610
  • 611
  • 612
  • 613
  • 614
  • 615
  • 616

master

[root@kube-20 ~]# kubectl apply -f ingress-new.yml

[root@kube-20 ~]# kubectl get po -n ingress-nginx
NAME                                        READY   STATUS      RESTARTS        AGE
ingress-nginx-admission-create-bpsmv        0/1     Completed   0               4h58m
ingress-nginx-admission-patch-jn6tb         0/1     Completed   0               4h58m
ingress-nginx-controller-6b548d5677-2n585   1/1     Running     1 (4h55m ago)   4h58m

[root@kube-20 ~]# kubectl describe po ingress-nginx-controller-6b548d5677-2n585 -n ingress-nginx


[root@kube-20 ~]# kubectl get po -n ingress-nginx   # admission是密钥不用管
NAME                                        READY   STATUS      RESTARTS        AGE
ingress-nginx-admission-create-bpsmv        0/1     Completed   0               4h58m
ingress-nginx-admission-patch-jn6tb         0/1     Completed   0               4h58m
ingress-nginx-controller-6b548d5677-2n585   1/1     Running     1 (4h56m ago)   4h58m


[root@kube-20 ~]# kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.108.62.203   <none>        80:32092/TCP,443:32462/TCP   4h59m
ingress-nginx-controller-admission   ClusterIP   10.98.203.170   <none>        443/TCP                      4h59m
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22

三、gitlab安装

1.下载到/opt/

去清华源下载gitlab-ce-13.3.8版本https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el7/gitlab-ce-13.3.8-ce.0.el7.x86_64.rpm

[root@gitlab-30 ~]# cd /opt/
  • 1

2.安装依赖

[root@gitlab-30 opt]# yum install -y curl policycoreutils-python openssh-server perl
  • 1

3.安装

[root@gitlab-30 opt]# rpm -ivh gitlab-ce-13.3.8-ce.0.el7.x86_64.rpm
或者
[root@gitlab-30 opt]# yum -y install gitlab-ce-13.3.8-ce.0.el7.x86_64.rpm
# 成功如下图出现一个狐狸的图案,还会告诉你要干什么
  • 1
  • 2
  • 3
  • 4

4.修改配置文件

[root@gitlab-30 opt]# cp /etc/gitlab/gitlab.rb /etc/gitlab/gitlab.rb.bak
[root@gitlab-30 opt]# vim /etc/gitlab/gitlab.rb
# 修改如下
改:32 external_url 'http://gitlab.example.com'
为:32 external_url 'http://192.168.188.30'   # 改为本机地址

# 刷新配置
[root@gitlab-30 opt]# sudo gitlab-ctl reconfigure
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

四、Jenkins安装

问题:在使用jenkins-2.289版本不支持k8s插件

下载安装

需要jdk的依赖(下载rpm包安装),可以选择去阿里源下载Jenkins的rpm包,官网选择lts版本(稳定版),2.332版本

rpm包内部会有一个tomcat的服务器不用另外安装,但是官网的war包需要

https://mirrors.aliyun.com/jenkins/redhat/

[root@Jenkins-40 ~]# cd /opt/
[root@Jenkins-40 opt]# yum -y install wget
[root@Jenkins-40 opt]# wget https://mirrors.aliyun.com/jenkins/redhat-stable/jenkins-2.332.3-1.1.noarch.rpm
[root@Jenkins-40 opt]# yum -y install jenkins-2.289.1-1.1.noarch.rpm

# jdk
# 1.8
[root@Jenkins-40 opt]# yum -y install jdk-8u151-linux-x64.rpm
[root@Jenkins-40 opt]# vim /etc/profile
# java
export JAVA_HOME=/usr/java/jdk1.8.0_151
export CLASSPATH=:.$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$JAVA_HOME/bin:$PATH
[root@Jenkins-40 opt]# source !$
source /etc/profile
[root@Jenkins-40 opt]# java -version
java version "1.8.0_151"
Java(TM) SE Runtime Environment (build 1.8.0_151-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.151-b12, mixed mode)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19

启动

[root@Jenkins-40 opt]# systemctl start jenkins.service
[root@Jenkins-40 opt]# systemctl status jenkins.service
[root@Jenkins-40 opt]# systemctl enable jenkins.service
[root@Jenkins-40 opt]# netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      861/sshd            
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1071/master         
tcp6       0      0 :::8080                 :::*                    LISTEN      10789/java          
tcp6       0      0 :::22                   :::*                    LISTEN      861/sshd            
tcp6       0      0 ::1:25                  :::*                    LISTEN      1071/master
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

配置

# 存放插件的目录
[root@Jenkins-40 opt]# cd /var/lib/jenkins/plugins/   # 如果是直接把以前的插件解压进来需要注意权限
[root@Jenkins-40 plugins]# ls

[root@Jenkins-40 updates]# ll /var/lib/jenkins/

# 浏览器输入IP地址:8080,如下图,按提示操作
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

[root@Jenkins-40 plugins]# cat /var/lib/jenkins/secrets/initialAdminPassword   # 生产上不要给其他人知道
ed91869da4284d079286c50b48233743
  • 1
  • 2

替换源

[root@Jenkins-40 plugins]# cd /var/lib/jenkins/updates
[root@Jenkins-40 updates]# ls
default.json  hudson.tasks.Maven.MavenInstaller
[root@Jenkins-40 updates]# cp default.json default.json.bak
# 修改成清华源和百度源
[root@Jenkins-40 updates]# sed -i 's#https://updates.jenkins.io/download#https://mirrors.tuna.tsinghua.edu.cn/jenkins#g' default.json && sed -i 's#http://www.google.com#https://www.baidu.com#g' default.json

# 验证是否修改成功
[root@Jenkins-40 updates]# cat default.json | grep "tuna"

# plugin manager --> 高级 --> 升级站点
# 填写清华园的链接https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

[root@Jenkins-40 updates]# systemctl restart jenkins.service
[root@Jenkins-40 updates]# systemctl status jenkins.service
# 重新登陆Jenkins即可,使用前面创建的用户进去
  • 1
  • 2
  • 3

Jenkins与k8s集成

1.Jenkins安装插件

安装 K8S 插件登录 Jenkins,系统管理→ 插件管理 → 搜索 kubernetes插件进行安装

2.对接 K8S 集群申请 K8S 凭据

因为 Jenkins 服务器在 kubernetes 集群之外,所以准备以下文件才能从外面连接到 kubernetes 集群

在 Jenkins 的 web 页面进行操作

Manage Jenkins ==> Configuration System ==> Cloud ==> a separate configuration page

(11条消息) jenkins 配置连接k8s 配置 cloud_我的喵叫初六的博客-CSDN博客_jenkins k8s cloud

将证书私钥填写至Jenkins
[root@kube-20 ~]# cat /etc/kubernetes/pki/ca.crt
  • 1
创建admin-csr.json
[root@kube-20 ~]# mkdir /data/
[root@kube-20 ~]# cd /data/
[root@kube-20 data]# chmod +x cfssl_1.6.1_linux_amd64
[root@kube-20 data]# chmod +x cfssljson_1.6.1_linux_amd64
[root@kube-20 data]# cp cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@kube-20 data]# cp cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
创建证书和私钥
cat > admin-csr.json << EOF
  {
      "CN":"admin",
      "key":{
          "algo":"rsa",
          "size":2048
      },
      "names":[
          {
              "C":"CN",
              "L":"GuangZhou",
              "ST":"GuangZhou",
              "O":"system:masters",
              "OU":"System"
          }
      ]
  }
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
[root@kube-20 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.crt -ca-key=/etc/kubernetes/pki/ca.key --profile=kubernetes admin-csr.json | cfssljson -bare admin
  • 1
配置证书
[root@kube-20 pki]# openssl pkcs12 -export -out ./jenkins-admin.pfx -inkey ./admin-key.pem -in ./admin.pem -passout pass:123456


kubeadm certs check-expiration  # 证书查询
  • 1
  • 2
  • 3
  • 4
[root@kube-20 pki]# sz jenkins-admin.pfx   # 传到桌面
  • 1

# kubernetes地址:为k8s api server地址,通过调用apiserver操作k8s,可以通过下面的命令查看api地址
[root@kube-20 ~]# kubectl cluster-info
  • 1
  • 2

保存!还可以用这种方法配置多个集群

3.构建

勾选此项

img

填写详细信息

[root@kube-20 pki]# cat ~/.kube/config

[root@kube-20 pki]# cat /etc/kubernetes/pki/ca.crt
  • 1
  • 2
  • 3

img

shell 命令

4.jenkins节点安装kubectl、docker、git

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


# 这里安装的是1.22.3版本
yum makecache --nogpgcheck

yum install -y kubectl-1.22.3 git
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
[root@jenkins-40 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2

[root@jenkins-40 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

[root@jenkins-40 ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

[root@jenkins-40 ~]# yum makecache fast -y

[root@jenkins-40 ~]# yum -y install docker-ce


mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://niphmo8u.mirror.aliyuncs.com"]
}
EOF

systemctl daemon-reload
systemctl start docker
systemctl enable docker
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

5.测试

后续集成

  1. gitlab 与 Jenkins 通过 webhooks 创建构建触发器
  2. 采用 harbor 或者阿里云的容器服务中的镜像仓库搭建私服仓库;修改构建触发器 shell 脚本对其打包成镜像上传到仓库部署到 k8s 集群上
root@jenkins:~# ssh-keygen
root@jenkins:~# ssh-copy-id 10.0.0.101
#代码部署和代码回滚的脚本,这个主要看你的流程发布
root@jenkins:/opt/app1# vim /data/scripts/test-job1.sh
#!/bin/bash

#记录脚本开始执行时间
starttime=`date +'%Y-%m-%d %H:%M:%S'`

#变量
SHELL_DIR="/root/scripts"
SHELL_NAME="$0"
K8S_CONTROLLER1="10.0.0.101"
K8S_CONTROLLER2="10.0.0.102"
DATE=`date +%Y-%m-%d_%H_%M_%S`
METHOD=$1
Branch=$2

echo $METHOD,$Branch

if test -z $Branch;then
  Branch=develop
fi


function Code_Clone(){
  Git_URL="git@10.0.0.31:test/app1.git"
  DIR_NAME=`echo ${Git_URL} |awk -F "/" '{print $2}' | awk -F "." '{print $1}'`
  DATA_DIR="/data/gitdata/test"
  Git_Dir="${DATA_DIR}/${DIR_NAME}"
  cd ${DATA_DIR} &&  echo "即将清空上一版本代码并获取当前分支最新代码" && sleep 1 && rm -rf ${DIR_NAME}
  echo "即将开始从分支${Branch} 获取代码" && sleep 1
  git clone -b ${Branch} ${Git_URL} 
  echo "分支${Branch} 克隆完成,即将进行代码编译!" && sleep 1
  #cd ${Git_Dir} && mvn clean package
  #echo "代码编译完成,即将开始将IP地址等信息替换为测试环境"
  #####################################################
  sleep 1
  cd ${Git_Dir}
  tar czf ${DIR_NAME}.tar.gz  ./*
}

#将打包好的压缩文件拷贝到k8s 控制端服务器
function Copy_File(){
  echo "压缩文件打包完成,即将拷贝到k8s 控制端服务器${K8S_CONTROLLER1}" && sleep 1
  scp ${Git_Dir}/${DIR_NAME}.tar.gz root@${K8S_CONTROLLER1}:/root/Dockerfile/tomcat
  echo "压缩文件拷贝完成,服务器${K8S_CONTROLLER1}即将开始制作Docker 镜像!" && sleep 1
}

#到控制端执行脚本制作并上传镜像
function Make_Image(){
  echo "开始制作Docker镜像并上传到Harbor服务器" && sleep 1
  ssh root@${K8S_CONTROLLER1} "cd /root/Dockerfile/tomcat && bash build.sh ${DATE}"
  echo "Docker镜像制作完成并已经上传到harbor服务器" && sleep 1
}

#到控制端更新k8s yaml文件中的镜像版本号,从而保持yaml文件中的镜像版本号和k8s中版本号一致
function Update_k8s_yaml(){
  echo "即将更新k8s yaml文件中镜像版本" && sleep 1
  ssh root@${K8S_CONTROLLER1} "cd /root/yaml/tomcat && sed -i 's/image: 10.0.0.*/image: 10.0.0.104\/baseimages\/tomcat:${DATE}/g' tomcat-app1.yaml"
  echo "k8s yaml文件镜像版本更新完成,即将开始更新容器中镜像版本" && sleep 1
}

#到控制端更新k8s中容器的版本号,有两种更新办法,一是指定镜像版本更新,二是apply执行修改过的yaml文件
function Update_k8s_container(){
  #第一种方法
  ssh root@${K8S_CONTROLLER1} "kubectl set image deployment/test-tomcat-app1-deployment  test-tomcat-app1-container=10.0.0.104/baseimages/tomcat:${DATE} -n test" 
  #第二种方法,推荐使用第一种
  #ssh root@${K8S_CONTROLLER1} "cd  /opt/k8s-data/yaml/magedu/tomcat-app1  && kubectl  apply -f tomcat-app1.yaml --record" 
  echo "k8s 镜像更新完成" && sleep 1
  echo "当前业务镜像版本: 10.0.0.104/baseimages/tomcat:${DATE}"
  #计算脚本累计执行时间,如果不需要的话可以去掉下面四行
  endtime=`date +'%Y-%m-%d %H:%M:%S'`
  start_seconds=$(date --date="$starttime" +%s);
  end_seconds=$(date --date="$endtime" +%s);
  echo "本次业务镜像更新总计耗时:"$((end_seconds-start_seconds))"s"
}

#基于k8s 内置版本管理回滚到上一个版本
function rollback_last_version(){
  echo "即将回滚之上一个版本"
  ssh root@${K8S_CONTROLLER1}  "kubectl rollout undo deployment/test-tomcat-app1-deployment  -n test"
  sleep 1
  echo "已执行回滚至上一个版本"
}

#使用帮助
usage(){
  echo "部署使用方法为 ${SHELL_DIR}/${SHELL_NAME} deploy "
  echo "回滚到上一版本使用方法为 ${SHELL_DIR}/${SHELL_NAME} rollback_last_version"
}

#主函数
main(){
  case ${METHOD}  in
  deploy)
    Code_Clone;
    Copy_File;
    Make_Image; 
    Update_k8s_yaml;
    Update_k8s_container;
  ;;
  rollback_last_version)
    rollback_last_version;
  ;;
  *)
    usage;
  esac;
}

main $1 $2


https://blog.csdn.net/weixin_43719988/article/details/121527727
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114

版本号和k8s中版本号一致
function Update_k8s_yaml(){
echo “即将更新k8s yaml文件中镜像版本” && sleep 1
ssh root@KaTeX parse error: Expected 'EOF', got '&' at position 41: …ot/yaml/tomcat &̲& sed -i 's/ima…{DATE}/g’ tomcat-app1.yaml"
echo “k8s yaml文件镜像版本更新完成,即将开始更新容器中镜像版本” && sleep 1
}

#到控制端更新k8s中容器的版本号,有两种更新办法,一是指定镜像版本更新,二是apply执行修改过的yaml文件
function Update_k8s_container(){
#第一种方法
ssh root@ K 8 S C O N T R O L L E R 1 " k u b e c t l s e t i m a g e d e p l o y m e n t / t e s t − t o m c a t − a p p 1 − d e p l o y m e n t t e s t − t o m c a t − a p p 1 − c o n t a i n e r = 10.0.0.104 / b a s e i m a g e s / t o m c a t : {K8S_CONTROLLER1} "kubectl set image deployment/test-tomcat-app1-deployment test-tomcat-app1-container=10.0.0.104/baseimages/tomcat: K8SCONTROLLER1"kubectlsetimagedeployment/testtomcatapp1deploymenttesttomcatapp1container=10.0.0.104/baseimages/tomcat:{DATE} -n test"
#第二种方法,推荐使用第一种
#ssh root@KaTeX parse error: Expected 'EOF', got '&' at position 63: …u/tomcat-app1 &̲& kubectl appl…{DATE}"
#计算脚本累计执行时间,如果不需要的话可以去掉下面四行
endtime=date +'%Y-%m-%d %H:%M:%S'
start_seconds= ( d a t e − − d a t e = " (date --date=" (datedate="starttime" +%s);
end_seconds= ( d a t e − − d a t e = " (date --date=" (datedate="endtime" +%s);
echo “本次业务镜像更新总计耗时:”$((end_seconds-start_seconds))“s”
}

#基于k8s 内置版本管理回滚到上一个版本
function rollback_last_version(){
echo “即将回滚之上一个版本”
ssh root@${K8S_CONTROLLER1} “kubectl rollout undo deployment/test-tomcat-app1-deployment -n test”
sleep 1
echo “已执行回滚至上一个版本”
}

#使用帮助
usage(){
echo "部署使用方法为 S H E L L D I R / {SHELL_DIR}/ SHELLDIR/{SHELL_NAME} deploy "
echo “回滚到上一版本使用方法为 S H E L L D I R / {SHELL_DIR}/ SHELLDIR/{SHELL_NAME} rollback_last_version”
}

#主函数
main(){
case ${METHOD} in
deploy)
Code_Clone;
Copy_File;
Make_Image;
Update_k8s_yaml;
Update_k8s_container;
;;
rollback_last_version)
rollback_last_version;
;;
*)
usage;
esac;
}

main $1 $2

https://blog.csdn.net/weixin_43719988/article/details/121527727


  • 1
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/799429
推荐阅读
相关标签
  

闽ICP备14008679号