赞
踩
学习使用sealos部署k8s,发现目前大部分都是sealos3的文章,中间有几个小坑,记录一下最终成功的操作步骤。
前置准备
5台虚拟机
hostname | ip | 类型 |
---|---|---|
k8s-master1 | 192.168.1.41 | 主节点 |
k8s-master2 | 192.168.1.42 | 主节点 |
k8s-master3 | 192.168.1.43 | 主节点 |
k8s-node1 | 192.168.1.46 | 工作点 |
k8s-node2 | 192.168.1.47 | 工作点 |
查看升级的镜像资源
http://elrepo.org/tiki/Download
# 清华节点
rpm -Uvh https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/elrepo-release-7.0-6.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 查看内核
grep initrd16 /boot/grub2/grub.cfg
# 设置启动时选择哪个内核
grub2-set-default 0
setenforce 0
# 修改SELINUX=enforcing 为 SELINUX=disabled
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
每台主机独立运行
# xxx 替换想要设置的主机名
hostnamectl set-hostname xxx
所有主机运行
cat > /etc/hosts <<-'EOF'
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.41 k8s-master1
192.168.1.42 k8s-master2
192.168.1.43 k8s-master3
192.168.1.46 k8s-node1
192.168.1.47 k8s-node2
EOF
yum install chrony -y
# 手动修改
# vi /etc/chrony.conf
# 修改三项
# server 127.127.1.0 iburst
# allow 192.168.1.0/24
# local stratum 10
# 命令修改
# 去除多余的 server
sed -i 's/server /# server/g' /etc/chrony.conf
# 设置本地同步
sed -i '6a\server 127.127.1.0 iburst' /etc/chrony.conf
# 允许ip段
sed -i 's/#allow 192.168.0.0/allow 192.168.1.0/g' /etc/chrony.conf
sed -i 's/#local /local /g' /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd --now
# 同步时间
chronyc sources
# 显示如下
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^? 127.127.1.0 0 7 0 - +0ns[ +0ns] +/- 0ns
yum install chrony -y
# 手动修改
# vi /etc/chrony.conf
# 设置同步服务器ip
# server 192.168.1.41 iburst
# 命令修改
# 去除多余的 server
sed -i 's/server /# server/g' /etc/chrony.conf
# 设置同步服务器ip
sed -i '6a\server 192.168.1.41 iburst' /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd --now
# 同步时间
chronyc sources
# 显示如下
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^? k8s-master1 0 6 0 - +0ns[ +0ns] +/- 0ns
chronyc -a makestep
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
设置 rsyslogd 和 systemd journald
mkdir -p /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
reboot
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
sealos官方文档
sealos GitHub
获取最新的 releases 版本
将包解压,只需要解压后的sealos文件
tar -zxvf sealos_4.3.5_linux_amd64.tar.gz
cp sealos /usr/bin/
# --debug调试模式,方便学习、排查和了解执行步骤
sealos run labring/kubernetes:v1.27.6 labring/helm:v3.12.3 labring/calico:v3.26.1 --masters 192.168.1.41 --nodes 192.168.1.46 --passwd 'kgb007' --debug
安装失败,清空集群,重新安装
# 不询问,直接清空
sealos reset --force=true
执行日志
[root@k8s-master1 sealos]# sealos run labring/kubernetes:v1.27.6 labring/helm:v3.12.3 labring/calico:v3.26.1 --masters 192.168.1.41 --nodes 192.168.1.46 --passwd 'kgb007' --debug
2023-10-13T17:19:21 debug create new buildah config /etc/containers/policy.json cause it's not exist
2023-10-13T17:19:21 debug create new buildah config /etc/containers/registries.conf cause it's not exist
2023-10-13T17:19:21 debug using file /etc/containers/storage.conf as container storage config
2023-10-13T17:19:21 debug create new buildah config /etc/containers/storage.conf cause it's not exist
2023-10-13T17:19:21 debug creating new cluster
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug defaultPort: 22
2023-10-13T17:19:21 debug start to exec `arch` on 192.168.1.46:22
2023-10-13T17:19:21 debug defaultPort: 22
2023-10-13T17:19:21 debug cluster info: apiVersion: apps.sealos.io/v1beta1
kind: Cluster
metadata:
creationTimestamp: null
name: default
spec:
hosts:
- ips:
- 192.168.1.41:22
roles:
- master
- amd64
- ips:
- 192.168.1.46:22
roles:
- node
- amd64
image:
- labring/kubernetes:v1.27.6
- labring/helm:v3.12.3
- labring/calico:v3.26.1
ssh:
passwd: kgb007
status: {}
2023-10-13T17:19:21 info Start to create a new cluster: master [192.168.1.41], worker [192.168.1.46], registry 192.168.1.41
2023-10-13T17:19:21 info Executing pipeline Check in CreateProcessor.
2023-10-13T17:19:21 info checker:hostname [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:19:21 debug start to exec remote 192.168.1.41:22 shell: hostname
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug start to exec remote 192.168.1.46:22 shell: hostname
2023-10-13T17:19:21 debug start to exec `hostname` on 192.168.1.46:22
2023-10-13T17:19:21 info checker:timeSync [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:19:21 debug start to exec remote 192.168.1.41:22 shell: date +%s
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug start to exec remote 192.168.1.46:22 shell: date +%s
2023-10-13T17:19:21 debug start to exec `date +%s` on 192.168.1.46:22
2023-10-13T17:19:22 info Executing pipeline PreProcess in CreateProcessor.
2023-10-13T17:19:22 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:19:22 debug parse reference //labring/kubernetes:v1.27.6 with transport docker
Resolving "labring/kubernetes" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/kubernetes:v1.27.6...
Getting image source signatures
Copying blob 6dafa313b3ad done
Copying blob 3acc1385eb7c done
Copying blob 147e29300f47 done
Copying blob 3796a06c178b done
Copying config 2c7407e39c done
Writing manifest to image destination
Storing signatures
2023-10-13T17:19:54 debug images 2c7407e39c29a3c17b046a8d1635eb156cb8ab49df2e41e1f7063b3ff4ef4229 are pulled
2023-10-13T17:19:54 debug Pull Policy for pull [missing]
2023-10-13T17:19:54 debug parse reference 2c7407e39c29a3c17b046a8d1635eb156cb8ab49df2e41e1f7063b3ff4ef4229 with transport containers-storage
2023-10-13T17:19:54 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:19:54 debug parse reference //labring/helm:v3.12.3 with transport docker
Resolving "labring/helm" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/helm:v3.12.3...
Getting image source signatures
Copying blob 4d7d76460a76 done
Copying config 446441537f done
Writing manifest to image destination
Storing signatures
2023-10-13T17:20:06 debug images 446441537f66d30e46d67a16c242e5b4de1b033524ee0c54370d09f5e0c4d3e0 are pulled
2023-10-13T17:20:06 debug Pull Policy for pull [missing]
2023-10-13T17:20:06 debug parse reference 446441537f66d30e46d67a16c242e5b4de1b033524ee0c54370d09f5e0c4d3e0 with transport containers-storage
2023-10-13T17:20:06 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:20:06 debug parse reference //labring/calico:v3.26.1 with transport docker
Resolving "labring/calico" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/calico:v3.26.1...
Getting image source signatures
Copying blob 26a63b9f87d4 done
Copying config 1e8350ad92 done
Writing manifest to image destination
Storing signatures
2023-10-13T17:20:35 debug images 1e8350ad92f040e3b065783b8d42441f4c4673394d48d5219fd4d8c6aef00bfc are pulled
2023-10-13T17:20:35 debug Pull Policy for pull [missing]
2023-10-13T17:20:35 debug parse reference 1e8350ad92f040e3b065783b8d42441f4c4673394d48d5219fd4d8c6aef00bfc with transport containers-storage
2023-10-13T17:20:35 info Executing pipeline RunConfig in CreateProcessor.
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 info Executing pipeline MountRootfs in CreateProcessor.
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/manifests
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/manifests
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/manifests
2023-10-13T17:20:46 debug send mount image, target: 192.168.1.46:22, image: labring/kubernetes:v1.27.6, type: rootfs
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/Kubefile to dst /var/lib/sealos/data/default/rootfs/Kubefile
2023-10-13T17:20:46 debug send mount image, target: 192.168.1.41:22, image: labring/kubernetes:v1.27.6, type: rootfs
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/Kubefile to dst /var/lib/sealos/data/default/rootfs/Kubefile
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/README.md to dst /var/lib/sealos/data/default/rootfs/README.md
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/bin to dst /var/lib/sealos/data/default/rootfs/bin
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/README.md to dst /var/lib/sealos/data/default/rootfs/README.md
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/bin to dst /var/lib/sealos/data/default/rootfs/bin
[1/1]copying files to 192.168.1.46:22 20% [==> ] (1/5, 103 it/s) [0s:0s]2023-10-13T17:20:48 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/cri to dst /var/lib/sealos/data/default/rootfs/cri
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc to dst /var/lib/sealos/data/default/rootfs/etc
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/images to dst /var/lib/sealos/data/default/rootfs/images
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/opt to dst /var/lib/sealos/data/default/rootfs/opt
2023-10-13T17:20:50 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts to dst /var/lib/sealos/data/default/rootfs/scripts
2023-10-13T17:20:50 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/statics to dst /var/lib/sealos/data/default/rootfs/statics
2023-10-13T17:21:07 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/cri to dst /var/lib/sealos/data/default/rootfs/cri
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc to dst /var/lib/sealos/data/default/rootfs/etc
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/images to dst /var/lib/sealos/data/default/rootfs/images
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/opt to dst /var/lib/sealos/data/default/rootfs/opt
2023-10-13T17:21:18 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts to dst /var/lib/sealos/data/default/rootfs/scripts
2023-10-13T17:21:18 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/statics to dst /var/lib/sealos/data/default/rootfs/statics
2023-10-13T17:21:18 debug send mount image, target: 192.168.1.41:22, image: labring/calico:v3.26.1, type: application
2023-10-13T17:21:18 debug send mount image, target: 192.168.1.41:22, image: labring/helm:v3.12.3, type: application
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/Kubefile to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/Kubefile
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/opt to dst /var/lib/sealos/data/default/applications/default-mamxdvth/workdir/opt
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/charts to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/charts
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/images to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/images
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/init.sh to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/init.sh
2023-10-13T17:21:18 info Executing pipeline MirrorRegistry in CreateProcessor.
2023-10-13T17:21:18 debug registry nodes is: [192.168.1.41:22]
2023-10-13T17:21:18 info trying default http mode to sync images to hosts [192.168.1.41:22]
2023-10-13T17:21:18 debug checking if endpoint http://192.168.1.41:5050 is alive
2023-10-13T17:21:18 debug running temporary registry on host 192.168.1.41:22
2023-10-13T17:21:18 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl registry serve filesystem -p 5050 --disable-logging=true /var/lib/sealos/data/default/rootfs/registry` via exec
2023-10-13T17:21:18 debug http endpoint http://192.168.1.41:5050 is alive
2023-10-13T17:21:18 debug checking if endpoint http://127.0.0.1:37493 is alive
2023-10-13T17:21:18 debug checking if endpoint http://127.0.0.1:32835 is alive
2023-10-13T17:21:18 debug http endpoint http://127.0.0.1:37493 is alive
2023-10-13T17:21:18 debug http endpoint http://127.0.0.1:32835 is alive
2023-10-13T17:21:18 debug syncing repos [{coredns/coredns 0 false false false} {etcd 0 false false false} {kube-apiserver 0 false false false} {kube-controller-manager 0 false false false} {kube-proxy 0 false false false} {kube-scheduler 0 false false false} {labring/lvscare 0 false false false} {pause 0 false false false}] from 127.0.0.1:37493 to 192.168.1.41:5050
2023-10-13T17:21:18 debug syncing repos [{calico/apiserver 0 false false false} {calico/cni 0 false false false} {calico/csi 0 false false false} {calico/kube-controllers 0 false false false} {calico/node 0 false false false} {calico/node-driver-registrar 0 false false false} {calico/pod2daemon-flexvol 0 false false false} {calico/typha 0 false false false} {tigera/key-cert-provisioner 0 false false false} {tigera/operator 0 false false false}] from 127.0.0.1:32835 to 192.168.1.41:5050
2023-10-13T17:21:18 debug syncing 192.168.1.41:5050/calico/apiserver:v3.26.1 with selection 1
2023-10-13T17:21:18 debug syncing 192.168.1.41:5050/coredns/coredns:v1.10.1 with selection 1
2023-10-13T17:21:20 debug syncing 192.168.1.41:5050/coredns/coredns:v1.10.1 with selection 0
2023-10-13T17:21:20 debug syncing 192.168.1.41:5050/etcd:3.5.7-0 with selection 1
2023-10-13T17:21:22 debug syncing 192.168.1.41:5050/calico/apiserver:v3.26.1 with selection 0
2023-10-13T17:21:22 debug syncing 192.168.1.41:5050/calico/cni:v3.26.1 with selection 1
2023-10-13T17:21:24 debug syncing 192.168.1.41:5050/etcd:3.5.7-0 with selection 0
2023-10-13T17:21:24 debug syncing 192.168.1.41:5050/kube-apiserver:v1.27.6 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/cni:v3.26.1 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/csi:v3.26.1 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/kube-apiserver:v1.27.6 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/kube-controller-manager:v1.27.6 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/csi:v3.26.1 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/kube-controllers:v3.26.1 with selection 1
2023-10-13T17:21:27 debug syncing 192.168.1.41:5050/kube-controller-manager:v1.27.6 with selection 0
2023-10-13T17:21:27 debug syncing 192.168.1.41:5050/kube-proxy:v1.27.6 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/calico/kube-controllers:v3.26.1 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/calico/node:v3.26.1 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-proxy:v1.27.6 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-scheduler:v1.27.6 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-scheduler:v1.27.6 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/labring/lvscare:v4.3.5 with selection 1
2023-10-13T17:21:29 debug syncing 192.168.1.41:5050/labring/lvscare:v4.3.5 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/pause:3.9 with selection 1
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/pause:3.9 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node:v3.26.1 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node-driver-registrar:v3.26.1 with selection 1
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node-driver-registrar:v3.26.1 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/pod2daemon-flexvol:v3.26.1 with selection 1
2023-10-13T17:21:31 debug syncing 192.168.1.41:5050/calico/pod2daemon-flexvol:v3.26.1 with selection 0
2023-10-13T17:21:31 debug syncing 192.168.1.41:5050/calico/typha:v3.26.1 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/calico/typha:v3.26.1 with selection 0
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/key-cert-provisioner:v1.1.9 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/key-cert-provisioner:v1.1.9 with selection 0
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/operator:v1.30.4 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/operator:v1.30.4 with selection 0
2023-10-13T17:21:32 info Executing pipeline Bootstrap in CreateProcessor
2023-10-13T17:21:32 debug apply [default_checker registry_host_applier registry_applier initializer] on hosts [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:21:32 debug apply default_checker on host 192.168.1.46:22
2023-10-13T17:21:32 debug apply default_checker on host 192.168.1.41:22
2023-10-13T17:21:32 debug start to exec `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPassword="passw0rd" registryUsername="admin" registryData="/var/lib/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" defaultVIP="10.103.97.2" registryDomain="sealos.hub" criData="/var/lib/containerd" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_SEALOS_VERSION="4.3.5" sandboxImage="pause:3.9" registryPort="5000" disableApparmor="false" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" registryConfig="/etc/registry" ; bash check.sh $registryData` on 192.168.1.46:22
2023-10-13T17:21:32 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPort="5000" registryUsername="admin" disableApparmor="false" SEALOS_SYS_SEALOS_VERSION="4.3.5" defaultVIP="10.103.97.2" registryDomain="sealos.hub" criData="/var/lib/containerd" registryPassword="passw0rd" sandboxImage="pause:3.9" registryConfig="/etc/registry" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" SEALOS_SYS_KUBE_VERSION="v1.27.6" registryData="/var/lib/registry" ; bash check.sh $registryData` via exec
INFO [2023-10-13 17:21:32] >> Check port kubelet port 10249..10259, reserved port 5050..5054 inuse. Please wait...
192.168.1.46:22 INFO [2023-10-13 17:21:32] >> Check port kubelet port 10249..10259, reserved port 5050..5054 inuse. Please wait...
which: no docker in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin)
WARN [2023-10-13 17:21:33] >> Replace disable_apparmor = false to disable_apparmor = true
INFO [2023-10-13 17:21:33] >> check root,port,cri success
192.168.1.46:22 which: no docker in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin)
192.168.1.46:22 WARN [2023-10-13 17:21:33] >> Replace disable_apparmor = false to disable_apparmor = true
192.168.1.46:22 INFO [2023-10-13 17:21:33] >> check root,port,cri success
2023-10-13T17:21:33 debug apply registry_host_applier on host 192.168.1.46:22
2023-10-13T17:21:33 debug apply registry_host_applier on host 192.168.1.41:22
2023-10-13T17:21:33 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:33 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:33 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"
2023-10-13T17:21:33 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"
2023-10-13T17:21:33 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:33 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:33 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl hosts add --ip 192.168.1.41 --domain sealos.hub` via exec
2023-10-13T17:21:33 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl hosts add --ip 192.168.1.41 --domain sealos.hub` on 192.168.1.46:22
2023-10-13T17:21:33 info domain sealos.hub:192.168.1.41 append success
192.168.1.46:22 2023-10-13T17:21:33 info domain sealos.hub:192.168.1.41 append success
2023-10-13T17:21:34 debug apply registry_applier on host 192.168.1.41:22
2023-10-13T17:21:34 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:34 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"
2023-10-13T17:21:34 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:34 debug make soft link: rm -rf /var/lib/registry && ln -s /var/lib/sealos/data/default/rootfs/registry /var/lib/registry
2023-10-13T17:21:34 debug start to run command `rm -rf /var/lib/registry && ln -s /var/lib/sealos/data/default/rootfs/registry /var/lib/registry` via exec
2023-10-13T17:21:34 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/registry_htpasswd to dst /var/lib/sealos/data/default/rootfs/etc/registry_htpasswd
2023-10-13T17:21:34 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryDomain="sealos.hub" disableApparmor="false" sandboxImage="pause:3.9" registryPort="5000" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" criData="/var/lib/containerd" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" registryConfig="/etc/registry" registryUsername="admin" defaultVIP="10.103.97.2" registryData="/var/lib/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" SEALOS_SYS_SEALOS_VERSION="4.3.5" registryPassword="passw0rd" ; bash init-registry.sh $registryData $registryConfig` via exec
Created symlink from /etc/systemd/system/multi-user.target.wants/registry.service to /etc/systemd/system/registry.service.
INFO [2023-10-13 17:21:34] >> Health check registry!
INFO [2023-10-13 17:21:34] >> registry is running
INFO [2023-10-13 17:21:34] >> init registry success
2023-10-13T17:21:34 debug apply initializer on host 192.168.1.46:22
2023-10-13T17:21:34 debug apply initializer on host 192.168.1.41:22
2023-10-13T17:21:34 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPassword="passw0rd" registryPort="5000" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" registryUsername="admin" SEALOS_SYS_SEALOS_VERSION="4.3.5" defaultVIP="10.103.97.2" registryDomain="sealos.hub" registryData="/var/lib/registry" criData="/var/lib/containerd" disableApparmor="false" sandboxImage="pause:3.9" registryConfig="/etc/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" ; bash init-cri.sh $registryDomain $registryPort && bash init.sh` via exec
2023-10-13T17:21:34 debug start to exec `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPort="5000" registryUsername="admin" registryDomain="sealos.hub" registryPassword="passw0rd" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" registryData="/var/lib/registry" criData="/var/lib/containerd" disableApparmor="false" sandboxImage="pause:3.9" defaultVIP="10.103.97.2" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_KUBE_VERSION="v1.27.6" SEALOS_SYS_SEALOS_VERSION="4.3.5" registryConfig="/etc/registry" ; bash init-cri.sh $registryDomain $registryPort && bash init.sh` on 192.168.1.46:22
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/containerd.service.
192.168.1.46:22 Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/containerd.service.
INFO [2023-10-13 17:21:38] >> Health check containerd!
INFO [2023-10-13 17:21:38] >> containerd is running
INFO [2023-10-13 17:21:38] >> init containerd success
Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/image-cri-shim.service.
INFO [2023-10-13 17:21:38] >> Health check image-cri-shim!
INFO [2023-10-13 17:21:38] >> image-cri-shim is running
INFO [2023-10-13 17:21:38] >> init shim success
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> Health check containerd!
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> containerd is running
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> init containerd success
192.168.1.46:22 Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/image-cri-shim.service.
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> Health check image-cri-shim!
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> image-cri-shim is running
192.168.1.46:22 INFO [2023-10-13 17:21:38] >> init shim success
192.168.1.46:22 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.46:22 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
fs.file-max = 1048576 # sealos
net.bridge.bridge-nf-call-ip6tables = 1 # sealos
net.bridge.bridge-nf-call-iptables = 1 # sealos
net.core.somaxconn = 65535 # sealos
net.ipv4.conf.all.rp_filter = 0 # sealos
net.ipv4.ip_forward = 1 # sealos
net.ipv4.ip_local_port_range = 1024 65535 # sealos
net.ipv4.tcp_keepalive_intvl = 30 # sealos
net.ipv4.tcp_keepalive_time = 600 # sealos
net.ipv4.vs.conn_reuse_mode = 0 # sealos
net.ipv4.vs.conntrack = 1 # sealos
net.ipv6.conf.all.forwarding = 1 # sealos
* Applying /etc/sysctl.conf ...
fs.file-max = 1048576 # sealos
net.bridge.bridge-nf-call-ip6tables = 1 # sealos
net.bridge.bridge-nf-call-iptables = 1 # sealos
net.core.somaxconn = 65535 # sealos
net.ipv4.conf.all.rp_filter = 0 # sealos
net.ipv4.ip_forward = 1 # sealos
net.ipv4.ip_local_port_range = 1024 65535 # sealos
net.ipv4.tcp_keepalive_intvl = 30 # sealos
net.ipv4.tcp_keepalive_time = 600 # sealos
net.ipv4.vs.conn_reuse_mode = 0 # sealos
net.ipv4.vs.conntrack = 1 # sealos
net.ipv6.conf.all.forwarding = 1 # sealos
INFO [2023-10-13 17:21:40] >> pull pause image sealos.hub:5000/pause:3.9
192.168.1.46:22 Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
192.168.1.46:22 Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
192.168.1.46:22 * Applying /usr/lib/sysctl.d/00-system.conf ...
192.168.1.46:22 net.bridge.bridge-nf-call-ip6tables = 0
192.168.1.46:22 net.bridge.bridge-nf-call-iptables = 0
192.168.1.46:22 net.bridge.bridge-nf-call-arptables = 0
192.168.1.46:22 * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
192.168.1.46:22 kernel.yama.ptrace_scope = 0
192.168.1.46:22 * Applying /usr/lib/sysctl.d/50-default.conf ...
192.168.1.46:22 kernel.sysrq = 16
192.168.1.46:22 kernel.core_uses_pid = 1
192.168.1.46:22 kernel.kptr_restrict = 1
192.168.1.46:22 net.ipv4.conf.default.rp_filter = 1
192.168.1.46:22 net.ipv4.conf.all.rp_filter = 1
192.168.1.46:22 net.ipv4.conf.default.accept_source_route = 0
192.168.1.46:22 net.ipv4.conf.all.accept_source_route = 0
192.168.1.46:22 net.ipv4.conf.default.promote_secondaries = 1
192.168.1.46:22 net.ipv4.conf.all.promote_secondaries = 1
192.168.1.46:22 fs.protected_hardlinks = 1
192.168.1.46:22 fs.protected_symlinks = 1
192.168.1.46:22 * Applying /etc/sysctl.d/99-sysctl.conf ...
192.168.1.46:22 fs.file-max = 1048576 # sealos
192.168.1.46:22 net.bridge.bridge-nf-call-ip6tables = 1 # sealos
192.168.1.46:22 net.bridge.bridge-nf-call-iptables = 1 # sealos
192.168.1.46:22 net.core.somaxconn = 65535 # sealos
192.168.1.46:22 net.ipv4.conf.all.rp_filter = 0 # sealos
192.168.1.46:22 net.ipv4.ip_forward = 1 # sealos
192.168.1.46:22 net.ipv4.ip_local_port_range = 1024 65535 # sealos
192.168.1.46:22 net.ipv4.tcp_keepalive_intvl = 30 # sealos
192.168.1.46:22 net.ipv4.tcp_keepalive_time = 600 # sealos
192.168.1.46:22 net.ipv4.vs.conn_reuse_mode = 0 # sealos
192.168.1.46:22 net.ipv4.vs.conntrack = 1 # sealos
192.168.1.46:22 net.ipv6.conf.all.forwarding = 1 # sealos
192.168.1.46:22 * Applying /etc/sysctl.conf ...
192.168.1.46:22 fs.file-max = 1048576 # sealos
192.168.1.46:22 net.bridge.bridge-nf-call-ip6tables = 1 # sealos
192.168.1.46:22 net.bridge.bridge-nf-call-iptables = 1 # sealos
192.168.1.46:22 net.core.somaxconn = 65535 # sealos
192.168.1.46:22 net.ipv4.conf.all.rp_filter = 0 # sealos
192.168.1.46:22 net.ipv4.ip_forward = 1 # sealos
192.168.1.46:22 net.ipv4.ip_local_port_range = 1024 65535 # sealos
192.168.1.46:22 net.ipv4.tcp_keepalive_intvl = 30 # sealos
192.168.1.46:22 net.ipv4.tcp_keepalive_time = 600 # sealos
192.168.1.46:22 net.ipv4.vs.conn_reuse_mode = 0 # sealos
192.168.1.46:22 net.ipv4.vs.conntrack = 1 # sealos
192.168.1.46:22 net.ipv6.conf.all.forwarding = 1 # sealos
Image is up to date for sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
INFO [2023-10-13 17:21:40] >> init kubelet success
INFO [2023-10-13 17:21:40] >> init rootfs success
192.168.1.46:22 INFO [2023-10-13 17:21:40] >> pull pause image sealos.hub:5000/pause:3.9
192.168.1.46:22 Image is up to date for sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
192.168.1.46:22 Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
192.168.1.46:22 INFO [2023-10-13 17:21:41] >> init kubelet success
192.168.1.46:22 INFO [2023-10-13 17:21:41] >> init rootfs success
2023-10-13T17:21:41 info Executing pipeline Init in CreateProcessor.
2023-10-13T17:21:41 info start to copy kubeadm config to master0
2023-10-13T17:21:41 debug using default kubeadm config
2023-10-13T17:21:41 debug skip merging kubeadm configs from cause file /var/lib/sealos/data/default/rootfs/etc/kubeadm.yml not exists
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri socket
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:41 debug get nodes [192.168.1.41:22] cri socket is [/run/containerd/containerd.sock]
2023-10-13T17:21:41 debug node: 192.168.1.41:22 , criSocket: /run/containerd/containerd.sock
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:41 debug get nodes [192.168.1.41:22] cgroup driver is [systemd]
2023-10-13T17:21:41 debug node: 192.168.1.41:22 , cGroupDriver: systemd
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug override defaults of kubelet configuration
2023-10-13T17:21:41 debug local 192.168.1.41:22 copy files src /root/.sealos/default/tmp/kubeadm-init.yaml to dst /root/.sealos/default/etc/kubeadm-init.yaml
2023-10-13T17:21:41 info start to generate cert and kubeConfig...
2023-10-13T17:21:41 debug start to run command `rm -rf /etc/kubernetes/admin.conf` via exec
2023-10-13T17:21:41 info start to generator cert and copy to masters...
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl hostname
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:42 info apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local k8s-master1:k8s-master1 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 192.168.1.41:192.168.1.41]}
2023-10-13T17:21:42 info Etcd altnames : {map[k8s-master1:k8s-master1 localhost:localhost] map[127.0.0.1:127.0.0.1 192.168.1.41:192.168.1.41 ::1:::1]}, commonName : k8s-master1
2023-10-13T17:21:44 debug cert.GenerateCert getServiceCIDR 10.96.0.0/22
2023-10-13T17:21:44 debug cert.GenerateCert param: /root/.sealos/default/pki /root/.sealos/default/pki/etcd [127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41] 192.168.1.41 k8s-master1 10.96.0.0/22 cluster.local
2023-10-13T17:21:44 info start to copy etc pki files to masters
2023-10-13T17:21:44 debug local 192.168.1.41:22 copy files src /root/.sealos/default/pki to dst /etc/kubernetes/pki
2023-10-13T17:21:44 info start to copy etc pki files to masters
2023-10-13T17:21:44 debug local 192.168.1.41:22 copy files src /root/.sealos/default/pki to dst /etc/kubernetes/pki
2023-10-13T17:21:44 info start to create kubeconfig...
2023-10-13T17:21:44 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl hostname
2023-10-13T17:21:44 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:44 debug [kubeconfig] Writing "admin.conf" kubeconfig file
2023-10-13T17:21:45 debug [kubeconfig] Writing "controller-manager.conf" kubeconfig file
2023-10-13T17:21:45 debug [kubeconfig] Writing "scheduler.conf" kubeconfig file
2023-10-13T17:21:45 debug [kubeconfig] Writing "kubelet.conf" kubeconfig file
2023-10-13T17:21:45 info start to copy kubeconfig files to masters
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/admin.conf to dst /etc/kubernetes/admin.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/controller-manager.conf to dst /etc/kubernetes/controller-manager.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/scheduler.conf to dst /etc/kubernetes/scheduler.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/kubelet.conf to dst /etc/kubernetes/kubelet.conf
2023-10-13T17:21:45 info start to copy static files to masters
2023-10-13T17:21:45 debug start to run command `mkdir -p /etc/kubernetes && cp -f /var/lib/sealos/data/default/rootfs/statics/audit-policy.yml /etc/kubernetes/audit-policy.yml` via exec
2023-10-13T17:21:45 info start to init master0...
2023-10-13T17:21:45 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl hosts add --ip 192.168.1.41 --domain apiserver.cluster.local` via exec
2023-10-13T17:21:45 info domain apiserver.cluster.local:192.168.1.41 append success
2023-10-13T17:21:45 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:45 debug get vip is 10.103.97.2
2023-10-13T17:21:45 debug start to run command `kubeadm init --config=/root/.sealos/default/etc/kubeadm-init.yaml --skip-certificate-key-print --skip-token-print -v 6 --ignore-preflight-errors=SystemVerification` via exec
I1013 17:21:45.997108 1977 initconfiguration.go:255] loading configuration from "/root/.sealos/default/etc/kubeadm-init.yaml"
W1013 17:21:46.002647 1977 initconfiguration.go:306] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: strict decoding error: unknown field "udpIdleTimeout"
W1013 17:21:46.003200 1977 configset.go:177] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: strict decoding error: unknown field "udpIdleTimeout"
W1013 17:21:46.004933 1977 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
W1013 17:21:46.004986 1977 utils.go:69] The recommended value for "healthzBindAddress" in "KubeletConfiguration" is: 127.0.0.1; the provided value is: 0.0.0.0
I1013 17:21:46.009210 1977 certs.go:519] validating certificate period for CA certificate
I1013 17:21:46.009356 1977 certs.go:519] validating certificate period for front-proxy CA certificate
[init] Using Kubernetes version: v1.27.6
[preflight] Running pre-flight checks
I1013 17:21:46.009548 1977 checks.go:563] validating Kubernetes and kubeadm version
I1013 17:21:46.009595 1977 checks.go:168] validating if the firewall is enabled and active
I1013 17:21:46.019772 1977 checks.go:203] validating availability of port 6443
I1013 17:21:46.019967 1977 checks.go:203] validating availability of port 10259
I1013 17:21:46.020034 1977 checks.go:203] validating availability of port 10257
I1013 17:21:46.020102 1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
I1013 17:21:46.020143 1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
I1013 17:21:46.020176 1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I1013 17:21:46.020189 1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I1013 17:21:46.020201 1977 checks.go:430] validating if the connectivity type is via proxy or direct
I1013 17:21:46.020233 1977 checks.go:469] validating http connectivity to first IP address in the CIDR
I1013 17:21:46.020312 1977 checks.go:469] validating http connectivity to first IP address in the CIDR
I1013 17:21:46.020360 1977 checks.go:104] validating the container runtime
I1013 17:21:46.048194 1977 checks.go:639] validating whether swap is enabled or not
I1013 17:21:46.048292 1977 checks.go:370] validating the presence of executable crictl
I1013 17:21:46.048328 1977 checks.go:370] validating the presence of executable conntrack
I1013 17:21:46.048368 1977 checks.go:370] validating the presence of executable ip
I1013 17:21:46.048396 1977 checks.go:370] validating the presence of executable iptables
I1013 17:21:46.048425 1977 checks.go:370] validating the presence of executable mount
I1013 17:21:46.048460 1977 checks.go:370] validating the presence of executable nsenter
I1013 17:21:46.048541 1977 checks.go:370] validating the presence of executable ebtables
I1013 17:21:46.048572 1977 checks.go:370] validating the presence of executable ethtool
I1013 17:21:46.048627 1977 checks.go:370] validating the presence of executable socat
[WARNING FileExisting-socat]: socat not found in system path
I1013 17:21:46.048709 1977 checks.go:370] validating the presence of executable tc
I1013 17:21:46.048755 1977 checks.go:370] validating the presence of executable touch
I1013 17:21:46.048788 1977 checks.go:516] running all checks
I1013 17:21:46.059477 1977 checks.go:401] checking whether the given node name is valid and reachable using net.LookupHost
I1013 17:21:46.059748 1977 checks.go:605] validating kubelet version
I1013 17:21:46.123390 1977 checks.go:130] validating if the "kubelet" service is enabled and active
I1013 17:21:46.132643 1977 checks.go:203] validating availability of port 10250
I1013 17:21:46.132733 1977 checks.go:329] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
I1013 17:21:46.132811 1977 checks.go:329] validating the contents of file /proc/sys/net/ipv4/ip_forward
I1013 17:21:46.132881 1977 checks.go:203] validating availability of port 2379
I1013 17:21:46.132945 1977 checks.go:203] validating availability of port 2380
I1013 17:21:46.133008 1977 checks.go:243] validating the existence and emptiness of directory /var/lib/etcd
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1013 17:21:46.133152 1977 checks.go:828] using image pull policy: IfNotPresent
I1013 17:21:46.247344 1977 checks.go:854] pulling: registry.k8s.io/kube-apiserver:v1.27.6
I1013 17:21:48.929039 1977 checks.go:854] pulling: registry.k8s.io/kube-controller-manager:v1.27.6
I1013 17:21:50.904674 1977 checks.go:854] pulling: registry.k8s.io/kube-scheduler:v1.27.6
I1013 17:21:52.250698 1977 checks.go:854] pulling: registry.k8s.io/kube-proxy:v1.27.6
W1013 17:21:54.076415 1977 checks.go:835] detected that the sandbox image "sealos.hub:5000/pause:3.9" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
I1013 17:21:54.218705 1977 checks.go:846] image exists: registry.k8s.io/pause:3.9
I1013 17:21:54.329121 1977 checks.go:854] pulling: registry.k8s.io/etcd:3.5.7-0
I1013 17:21:59.222597 1977 checks.go:854] pulling: registry.k8s.io/coredns/coredns:v1.10.1
[certs] Using certificateDir folder "/etc/kubernetes/pki"
I1013 17:22:00.424183 1977 certs.go:519] validating certificate period for ca certificate
[certs] Using existing ca certificate authority
I1013 17:22:00.424855 1977 certs.go:519] validating certificate period for apiserver certificate
[certs] Using existing apiserver certificate and key on disk
I1013 17:22:00.425468 1977 certs.go:519] validating certificate period for apiserver-kubelet-client certificate
[certs] Using existing apiserver-kubelet-client certificate and key on disk
I1013 17:22:00.426055 1977 certs.go:519] validating certificate period for front-proxy-ca certificate
[certs] Using existing front-proxy-ca certificate authority
I1013 17:22:00.426638 1977 certs.go:519] validating certificate period for front-proxy-client certificate
[certs] Using existing front-proxy-client certificate and key on disk
I1013 17:22:00.427209 1977 certs.go:519] validating certificate period for etcd/ca certificate
[certs] Using existing etcd/ca certificate authority
I1013 17:22:00.427832 1977 certs.go:519] validating certificate period for etcd/server certificate
[certs] Using existing etcd/server certificate and key on disk
I1013 17:22:00.428411 1977 certs.go:519] validating certificate period for etcd/peer certificate
[certs] Using existing etcd/peer certificate and key on disk
I1013 17:22:00.428947 1977 certs.go:519] validating certificate period for etcd/healthcheck-client certificate
[certs] Using existing etcd/healthcheck-client certificate and key on disk
I1013 17:22:00.429531 1977 certs.go:519] validating certificate period for apiserver-etcd-client certificate
[certs] Using existing apiserver-etcd-client certificate and key on disk
I1013 17:22:00.430061 1977 certs.go:78] creating new public/private key files for signing service account users
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1013 17:22:00.430595 1977 kubeconfig.go:103] creating kubeconfig file for admin.conf
I1013 17:22:00.520018 1977 loader.go:373] Config loaded from file: /etc/kubernetes/admin.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
I1013 17:22:00.520076 1977 kubeconfig.go:103] creating kubeconfig file for kubelet.conf
I1013 17:22:00.745409 1977 loader.go:373] Config loaded from file: /etc/kubernetes/kubelet.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
I1013 17:22:00.745501 1977 kubeconfig.go:103] creating kubeconfig file for controller-manager.conf
I1013 17:22:00.916114 1977 loader.go:373] Config loaded from file: /etc/kubernetes/controller-manager.conf
W1013 17:22:00.916161 1977 kubeconfig.go:264] a kubeconfig file "/etc/kubernetes/controller-manager.conf" exists already but has an unexpected API Server URL: expected: https://192.168.1.41:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
I1013 17:22:00.916192 1977 kubeconfig.go:103] creating kubeconfig file for scheduler.conf
I1013 17:22:01.073906 1977 loader.go:373] Config loaded from file: /etc/kubernetes/scheduler.conf
W1013 17:22:01.073948 1977 kubeconfig.go:264] a kubeconfig file "/etc/kubernetes/scheduler.conf" exists already but has an unexpected API Server URL: expected: https://192.168.1.41:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
I1013 17:22:01.073985 1977 kubelet.go:67] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I1013 17:22:01.200938 1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.201293 1977 manifests.go:125] [control-plane] adding volume "audit" for component "kube-apiserver"
I1013 17:22:01.201312 1977 manifests.go:125] [control-plane] adding volume "audit-log" for component "kube-apiserver"
I1013 17:22:01.201324 1977 manifests.go:125] [control-plane] adding volume "ca-certs" for component "kube-apiserver"
I1013 17:22:01.201334 1977 manifests.go:125] [control-plane] adding volume "etc-pki" for component "kube-apiserver"
I1013 17:22:01.201346 1977 manifests.go:125] [control-plane] adding volume "k8s-certs" for component "kube-apiserver"
I1013 17:22:01.201356 1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-apiserver"
I1013 17:22:01.205107 1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I1013 17:22:01.205149 1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.206120 1977 manifests.go:125] [control-plane] adding volume "ca-certs" for component "kube-controller-manager"
I1013 17:22:01.206141 1977 manifests.go:125] [control-plane] adding volume "etc-pki" for component "kube-controller-manager"
I1013 17:22:01.206151 1977 manifests.go:125] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager"
I1013 17:22:01.206161 1977 manifests.go:125] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager"
I1013 17:22:01.206171 1977 manifests.go:125] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager"
I1013 17:22:01.206181 1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-controller-manager"
I1013 17:22:01.206956 1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[control-plane] Creating static Pod manifest for "kube-scheduler"
I1013 17:22:01.206979 1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.207255 1977 manifests.go:125] [control-plane] adding volume "kubeconfig" for component "kube-scheduler"
I1013 17:22:01.207271 1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-scheduler"
I1013 17:22:01.207757 1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1013 17:22:01.208568 1977 local.go:65] [etcd] wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
I1013 17:22:01.208585 1977 waitcontrolplane.go:83] [wait-control-plane] Waiting for the API server to be healthy
I1013 17:22:01.208999 1977 loader.go:373] Config loaded from file: /etc/kubernetes/admin.conf
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1013 17:22:01.210509 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s in 0 milliseconds
I1013 17:22:01.711816 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s in 0 milliseconds
I1013 17:22:02.211939 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s in 0 milliseconds
I1013 17:22:02.712126 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s in 0 milliseconds
I1013 17:22:03.211976 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s in 0 milliseconds
I1013 17:22:05.804729 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 2093 milliseconds
I1013 17:22:06.212873 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds
I1013 17:22:06.712275 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds
I1013 17:22:07.212989 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 2 milliseconds
I1013 17:22:07.715485 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 200 OK in 1 milliseconds
[apiclient] All control plane components are healthy after 6.505532 seconds
I1013 17:22:07.715580 1977 uploadconfig.go:112] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1013 17:22:07.721197 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.726186 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.733694 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 5 milliseconds
I1013 17:22:07.733941 1977 uploadconfig.go:126] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1013 17:22:07.742224 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 7 milliseconds
I1013 17:22:07.749284 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 6 milliseconds
I1013 17:22:07.754215 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.754406 1977 uploadconfig.go:131] [upload-config] Preserving the CRISocket information for the control-plane node
I1013 17:22:07.754424 1977 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///run/containerd/containerd.sock" to the Node API object "k8s-master1" as an annotation
I1013 17:22:08.257674 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 2 milliseconds
I1013 17:22:08.265524 1977 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 5 milliseconds
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
I1013 17:22:08.769230 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 2 milliseconds
I1013 17:22:08.776252 1977 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 5 milliseconds
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1013 17:22:08.778993 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/secrets/bootstrap-token-5w92af?timeout=10s 404 Not Found in 2 milliseconds
I1013 17:22:08.783684 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/secrets?timeout=10s 201 Created in 4 milliseconds
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1013 17:22:08.786779 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:08.790016 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1013 17:22:08.793691 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 3 milliseconds
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1013 17:22:08.796556 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1013 17:22:08.799523 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1013 17:22:08.799657 1977 clusterinfo.go:47] [bootstrap-token] loading admin kubeconfig
I1013 17:22:08.800137 1977 loader.go:373] Config loaded from file: /etc/kubernetes/admin.conf
I1013 17:22:08.800155 1977 clusterinfo.go:58] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig
I1013 17:22:08.800626 1977 clusterinfo.go:70] [bootstrap-token] creating/updating ConfigMap in kube-public namespace
I1013 17:22:08.803595 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-public/configmaps?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:08.803771 1977 clusterinfo.go:84] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace
I1013 17:22:08.807677 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:08.810897 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:08.811062 1977 kubeletfinalize.go:90] [kubelet-finalize] Assuming that kubelet client certificate rotation is enabled: found "/var/lib/kubelet/pki/kubelet-client-current.pem"
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1013 17:22:08.811619 1977 loader.go:373] Config loaded from file: /etc/kubernetes/kubelet.conf
I1013 17:22:08.812170 1977 kubeletfinalize.go:134] [kubelet-finalize] Restarting the kubelet to enable client certificate rotation
I1013 17:22:09.023934 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/deployments?labelSelector=k8s-app%3Dkube-dns 200 OK in 2 milliseconds
I1013 17:22:09.029246 1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/coredns?timeout=10s 404 Not Found in 3 milliseconds
I1013 17:22:09.033231 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.036216 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.039259 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.042499 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.050598 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/deployments?timeout=10s 201 Created in 7 milliseconds
I1013 17:22:09.058045 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/services?timeout=10s 201 Created in 6 milliseconds
[addons] Applied essential addon: CoreDNS
I1013 17:22:09.062452 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.071950 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/daemonsets?timeout=10s 201 Created in 8 milliseconds
I1013 17:22:09.075775 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.078976 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.085983 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 6 milliseconds
I1013 17:22:09.186601 1977 request.go:628] Waited for 100.309979ms due to client-side throttling, not priority and fairness, request: POST:https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s
I1013 17:22:09.191432 1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 4 milliseconds
[addons] Applied essential addon: kube-proxy
I1013 17:22:09.192110 1977 loader.go:373] Config loaded from file: /etc/kubernetes/admin.conf
I1013 17:22:09.192679 1977 loader.go:373] Config loaded from file: /etc/kubernetes/admin.conf
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join apiserver.cluster.local:6443 --token <value withheld> \
--discovery-token-ca-cert-hash sha256:8a17edfd9a3ac44f2786991601c9fd13e8ff7f915b2a8f3bb45ac6aadca3ba06 \
--control-plane --certificate-key <value withheld>
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join apiserver.cluster.local:6443 --token <value withheld> \
--discovery-token-ca-cert-hash sha256:8a17edfd9a3ac44f2786991601c9fd13e8ff7f915b2a8f3bb45ac6aadca3ba06
2023-10-13T17:22:09 debug start to run command `rm -rf $HOME/.kube/config && mkdir -p $HOME/.kube && cp /etc/kubernetes/admin.conf $HOME/.kube/config` via exec
2023-10-13T17:22:09 info Executing pipeline Join in CreateProcessor.
2023-10-13T17:22:09 info [192.168.1.46:22] will be added as worker
2023-10-13T17:22:09 info start to get kubernetes token...
2023-10-13T17:22:09 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl token /root/.sealos/default/etc/kubeadm-init.yaml 386a68505a2ee5995726c30ba3c5e5ecd03e401d97bfe66c039510b7826bc21c
2023-10-13T17:22:09 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:22:09 info fetch certSANs from kubeadm configmap
2023-10-13T17:22:09 debug current cluster config data: map[apiServer:map[certSANs:[127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41] extraArgs:map[audit-log-format:json audit-log-maxage:7 audit-log-maxbackup:10 audit-log-maxsize:100 audit-log-path:/var/log/kubernetes/audit.log audit-policy-file:/etc/kubernetes/audit-policy.yml authorization-mode:Node,RBAC enable-aggregator-routing:true feature-gates:] extraVolumes:[map[hostPath:/etc/kubernetes mountPath:/etc/kubernetes name:audit pathType:DirectoryOrCreate] map[hostPath:/var/log/kubernetes mountPath:/var/log/kubernetes name:audit-log pathType:DirectoryOrCreate] map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]] timeoutForControlPlane:4m0s] apiVersion:kubeadm.k8s.io/v1beta3 certificatesDir:/etc/kubernetes/pki clusterName:kubernetes controlPlaneEndpoint:apiserver.cluster.local:6443 controllerManager:map[extraArgs:map[bind-address:0.0.0.0 cluster-signing-duration:876000h feature-gates:] extraVolumes:[map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]]] dns:map[] etcd:map[local:map[dataDir:/var/lib/etcd extraArgs:map[listen-metrics-urls:http://0.0.0.0:2381]]] imageRepository:registry.k8s.io kind:ClusterConfiguration kubernetesVersion:v1.27.6 networking:map[dnsDomain:cluster.local podSubnet:100.64.0.0/10 serviceSubnet:10.96.0.0/22] scheduler:map[extraArgs:map[bind-address:0.0.0.0 feature-gates:] extraVolumes:[map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]]]]
2023-10-13T17:22:09 debug current cluster certSANs: [127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41]
2023-10-13T17:22:09 info start to join 192.168.1.46:22 as worker
2023-10-13T17:22:09 info start to copy kubeadm join config to node: 192.168.1.46:22
2023-10-13T17:22:09 debug using default kubeadm config
2023-10-13T17:22:09 debug skip merging kubeadm configs from cause file /var/lib/sealos/data/default/rootfs/etc/kubeadm.yml not exists
2023-10-13T17:22:09 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:09 debug get vip is 10.103.97.2
2023-10-13T17:22:09 debug start to exec remote 192.168.1.46:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri socket
2023-10-13T17:22:09 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl cri socket` on 192.168.1.46:22
2023-10-13T17:22:09 debug get nodes [192.168.1.46:22] cri socket is [/run/containerd/containerd.sock]
2023-10-13T17:22:09 debug node: 192.168.1.46:22 , criSocket: /run/containerd/containerd.sock
2023-10-13T17:22:09 debug start to exec remote 192.168.1.46:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short
2023-10-13T17:22:09 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short` on 192.168.1.46:22
2023-10-13T17:22:10 debug get nodes [192.168.1.46:22] cgroup driver is [systemd]
2023-10-13T17:22:10 debug node: 192.168.1.46:22 , cGroupDriver: systemd
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug remote copy files src /root/.sealos/default/tmp/kubeadm-join-node.yaml to dst /root/.sealos/default/etc/kubeadm-join-node.yaml
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl hosts add --ip 10.103.97.2 --domain apiserver.cluster.local` on 192.168.1.46:22
192.168.1.46:22 2023-10-13T17:22:10 info domain apiserver.cluster.local:10.103.97.2 append success
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl hosts add --ip 192.168.1.46 --domain lvscare.node.ip` on 192.168.1.46:22
192.168.1.46:22 2023-10-13T17:22:10 info domain lvscare.node.ip:192.168.1.46 append success
2023-10-13T17:22:10 info run ipvs once module: 192.168.1.46:22
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl ipvs --vs 10.103.97.2:6443 --rs 192.168.1.41:6443 --health-path /healthz --health-schem https --run-once` on 192.168.1.46:22
192.168.1.46:22 2023-10-13T17:22:10 info Trying to add route
192.168.1.46:22 2023-10-13T17:22:10 info success to set route.(host:10.103.97.2, gateway:192.168.1.46)
2023-10-13T17:22:10 info start join node: 192.168.1.46:22
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `kubeadm join --config=/root/.sealos/default/etc/kubeadm-join-node.yaml -v 6` on 192.168.1.46:22
192.168.1.46:22 I1013 17:22:10.561231 1954 join.go:412] [preflight] found NodeName empty; using OS hostname as NodeName
192.168.1.46:22 I1013 17:22:10.561310 1954 joinconfiguration.go:76] loading configuration from "/root/.sealos/default/etc/kubeadm-join-node.yaml"
192.168.1.46:22 W1013 17:22:10.562477 1954 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
192.168.1.46:22 [preflight] Running pre-flight checks
192.168.1.46:22 I1013 17:22:10.562620 1954 preflight.go:93] [preflight] Running general checks
192.168.1.46:22 I1013 17:22:10.562687 1954 checks.go:280] validating the existence of file /etc/kubernetes/kubelet.conf
192.168.1.46:22 I1013 17:22:10.562705 1954 checks.go:280] validating the existence of file /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22 I1013 17:22:10.562725 1954 checks.go:104] validating the container runtime
192.168.1.46:22 I1013 17:22:10.592094 1954 checks.go:639] validating whether swap is enabled or not
192.168.1.46:22 I1013 17:22:10.592189 1954 checks.go:370] validating the presence of executable crictl
192.168.1.46:22 I1013 17:22:10.592246 1954 checks.go:370] validating the presence of executable conntrack
192.168.1.46:22 I1013 17:22:10.592312 1954 checks.go:370] validating the presence of executable ip
192.168.1.46:22 I1013 17:22:10.592364 1954 checks.go:370] validating the presence of executable iptables
192.168.1.46:22 I1013 17:22:10.592418 1954 checks.go:370] validating the presence of executable mount
192.168.1.46:22 I1013 17:22:10.592476 1954 checks.go:370] validating the presence of executable nsenter
192.168.1.46:22 I1013 17:22:10.592564 1954 checks.go:370] validating the presence of executable ebtables
192.168.1.46:22 I1013 17:22:10.592617 1954 checks.go:370] validating the presence of executable ethtool
192.168.1.46:22 I1013 17:22:10.592680 1954 checks.go:370] validating the presence of executable socat
192.168.1.46:22 [WARNING FileExisting-socat]: socat not found in system path
192.168.1.46:22 I1013 17:22:10.592801 1954 checks.go:370] validating the presence of executable tc
192.168.1.46:22 I1013 17:22:10.592854 1954 checks.go:370] validating the presence of executable touch
192.168.1.46:22 I1013 17:22:10.592921 1954 checks.go:516] running all checks
192.168.1.46:22 I1013 17:22:10.604349 1954 checks.go:401] checking whether the given node name is valid and reachable using net.LookupHost
192.168.1.46:22 I1013 17:22:10.604655 1954 checks.go:605] validating kubelet version
192.168.1.46:22 I1013 17:22:10.671270 1954 checks.go:130] validating if the "kubelet" service is enabled and active
192.168.1.46:22 I1013 17:22:10.681081 1954 checks.go:203] validating availability of port 10250
192.168.1.46:22 I1013 17:22:10.681276 1954 checks.go:280] validating the existence of file /etc/kubernetes/pki/ca.crt
192.168.1.46:22 I1013 17:22:10.681357 1954 checks.go:430] validating if the connectivity type is via proxy or direct
192.168.1.46:22 I1013 17:22:10.681404 1954 checks.go:329] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
192.168.1.46:22 I1013 17:22:10.681473 1954 checks.go:329] validating the contents of file /proc/sys/net/ipv4/ip_forward
192.168.1.46:22 I1013 17:22:10.681534 1954 join.go:529] [preflight] Discovering cluster-info
192.168.1.46:22 I1013 17:22:10.681578 1954 token.go:80] [discovery] Created cluster-info discovery client, requesting info from "10.103.97.2:6443"
192.168.1.46:22 I1013 17:22:10.690072 1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22 I1013 17:22:10.690481 1954 token.go:223] [discovery] The cluster-info ConfigMap does not yet contain a JWS signature for token ID "jlagbc", will try again
192.168.1.46:22 I1013 17:22:15.775241 1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 3 milliseconds
192.168.1.46:22 I1013 17:22:15.775595 1954 token.go:223] [discovery] The cluster-info ConfigMap does not yet contain a JWS signature for token ID "jlagbc", will try again
192.168.1.46:22 I1013 17:22:21.415228 1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 3 milliseconds
192.168.1.46:22 I1013 17:22:21.416013 1954 token.go:118] [discovery] Requesting info from "10.103.97.2:6443" again to validate TLS against the pinned public key
192.168.1.46:22 I1013 17:22:21.423733 1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22 I1013 17:22:21.424129 1954 token.go:135] [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "10.103.97.2:6443"
192.168.1.46:22 I1013 17:22:21.424172 1954 discovery.go:52] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
192.168.1.46:22 I1013 17:22:21.424206 1954 join.go:543] [preflight] Fetching init configuration
192.168.1.46:22 I1013 17:22:21.424231 1954 join.go:589] [preflight] Retrieving KubeConfig objects
192.168.1.46:22 [preflight] Reading configuration from the cluster...
192.168.1.46:22 [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
192.168.1.46:22 I1013 17:22:21.431787 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22 I1013 17:22:21.435262 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kube-proxy?timeout=10s 200 OK in 1 milliseconds
192.168.1.46:22 I1013 17:22:21.437782 1954 kubelet.go:74] attempting to download the KubeletConfiguration from ConfigMap "kubelet-config"
192.168.1.46:22 I1013 17:22:21.440548 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config?timeout=10s 200 OK in 2 milliseconds
192.168.1.46:22 I1013 17:22:21.443472 1954 interface.go:432] Looking for default routes with IPv4 addresses
192.168.1.46:22 I1013 17:22:21.443505 1954 interface.go:437] Default route transits interface "ens33"
192.168.1.46:22 I1013 17:22:21.443782 1954 interface.go:209] Interface ens33 is up
192.168.1.46:22 I1013 17:22:21.443952 1954 interface.go:257] Interface "ens33" has 2 addresses :[192.168.1.46/24 fe80::8773:c4f5:cbc8:7f56/64].
192.168.1.46:22 I1013 17:22:21.443976 1954 interface.go:224] Checking addr 192.168.1.46/24.
192.168.1.46:22 I1013 17:22:21.443987 1954 interface.go:231] IP found 192.168.1.46
192.168.1.46:22 I1013 17:22:21.444018 1954 interface.go:263] Found valid IPv4 address 192.168.1.46 for interface "ens33".
192.168.1.46:22 I1013 17:22:21.444047 1954 interface.go:443] Found active IP 192.168.1.46
192.168.1.46:22 W1013 17:22:21.444069 1954 utils.go:69] The recommended value for "healthzBindAddress" in "KubeletConfiguration" is: 127.0.0.1; the provided value is: 0.0.0.0
192.168.1.46:22 I1013 17:22:21.448455 1954 preflight.go:104] [preflight] Running configuration dependant checks
192.168.1.46:22 I1013 17:22:21.448492 1954 controlplaneprepare.go:225] [download-certs] Skipping certs download
192.168.1.46:22 I1013 17:22:21.448510 1954 kubelet.go:121] [kubelet-start] writing bootstrap kubelet config file at /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22 I1013 17:22:21.449164 1954 kubelet.go:136] [kubelet-start] writing CA certificate at /etc/kubernetes/pki/ca.crt
192.168.1.46:22 I1013 17:22:21.449596 1954 loader.go:373] Config loaded from file: /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22 I1013 17:22:21.449887 1954 kubelet.go:157] [kubelet-start] Checking for an existing Node in the cluster with name "k8s-node1" and status "Ready"
192.168.1.46:22 I1013 17:22:21.452477 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 2 milliseconds
192.168.1.46:22 I1013 17:22:21.452914 1954 kubelet.go:172] [kubelet-start] Stopping the kubelet
192.168.1.46:22 [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
192.168.1.46:22 [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
192.168.1.46:22 [kubelet-start] Starting the kubelet
192.168.1.46:22 [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
192.168.1.46:22 I1013 17:22:22.580102 1954 loader.go:373] Config loaded from file: /etc/kubernetes/kubelet.conf
192.168.1.46:22 I1013 17:22:22.581326 1954 loader.go:373] Config loaded from file: /etc/kubernetes/kubelet.conf
192.168.1.46:22 I1013 17:22:22.581486 1954 cert_rotation.go:137] Starting client certificate rotation controller
192.168.1.46:22 I1013 17:22:22.581771 1954 kubelet.go:220] [kubelet-start] preserving the crisocket information for the node
192.168.1.46:22 I1013 17:22:22.581808 1954 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///run/containerd/containerd.sock" to the Node API object "k8s-node1" as an annotation
192.168.1.46:22 I1013 17:22:23.091731 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 8 milliseconds
192.168.1.46:22 I1013 17:22:23.585294 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 2 milliseconds
192.168.1.46:22 I1013 17:22:24.084994 1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 200 OK in 2 milliseconds
192.168.1.46:22 I1013 17:22:24.090869 1954 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 200 OK in 4 milliseconds
192.168.1.46:22
192.168.1.46:22 This node has joined the cluster:
192.168.1.46:22 * Certificate signing request was sent to apiserver and a response was received.
192.168.1.46:22 * The Kubelet was informed of the new secure connection details.
192.168.1.46:22
192.168.1.46:22 Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
192.168.1.46:22
2023-10-13T17:22:24 info succeeded in joining 192.168.1.46:22 as worker
2023-10-13T17:22:24 debug remote copy files src /root/.sealos/default/etc/admin.conf to dst .kube/config
2023-10-13T17:22:24 info start to sync lvscare static pod to node: 192.168.1.46:22 master: [192.168.1.41:6443]
2023-10-13T17:22:24 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:24 debug get vip is 10.103.97.2
2023-10-13T17:22:24 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl static-pod lvscare --name kube-sealos-lvscare --vip 10.103.97.2:6443 --image ghcr.io/labring/lvscare:v4.3.5 --masters 192.168.1.41:6443` on 192.168.1.46:22
192.168.1.46:22 2023-10-13T17:22:24 info generator lvscare static pod is success
2023-10-13T17:22:24 info Executing pipeline RunGuest in CreateProcessor.
2023-10-13T17:22:24 debug start to exec `` on 192.168.1.46:22
2023-10-13T17:22:24 debug start to run command `` via exec
2023-10-13T17:22:24 debug start to run command `cd /var/lib/sealos/data/default/applications/default-mamxdvth/workdir && cp -f opt/helm /usr/bin/helm` via exec
2023-10-13T17:22:24 debug start to run command `cd /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir && helm upgrade -i calico charts/calico -f charts/calico.values.yaml -n tigera-operator --create-namespace` via exec
Release "calico" does not exist. Installing it now.
NAME: calico
LAST DEPLOYED: Fri Oct 13 17:22:27 2023
NAMESPACE: tigera-operator
STATUS: deployed
REVISION: 1
TEST SUITE: None
2023-10-13T17:22:29 info succeeded in creating a new cluster, enjoy it!
2023-10-13T17:22:29 debug save objects into local: /root/.sealos/default/Clusterfile, objects: [apiVersion: apps.sealos.io/v1beta1
kind: Cluster
metadata:
creationTimestamp: "2023-10-13T09:22:29Z"
name: default
spec:
hosts:
- ips:
- 192.168.1.41:22
roles:
- master
- amd64
- ips:
- 192.168.1.46:22
roles:
- node
- amd64
image:
- labring/kubernetes:v1.27.6
- labring/helm:v3.12.3
- labring/calico:v3.26.1
ssh:
passwd: kgb007
status:
commandCondition:
- images:
- labring/kubernetes:v1.27.6
- labring/helm:v3.12.3
- labring/calico:v3.26.1
lastHeartbeatTime: "2023-10-13T09:22:29Z"
message: Applied to cluster successfully
reason: Apply Command
status: "True"
type: ApplyCommandSuccess
conditions:
- lastHeartbeatTime: "2023-10-13T09:22:29Z"
message: Applied to cluster successfully
reason: Ready
status: "True"
type: ApplyClusterSuccess
mounts:
- env:
SEALOS_SYS_CRI_ENDPOINT: /var/run/containerd/containerd.sock
SEALOS_SYS_IMAGE_ENDPOINT: /var/run/image-cri-shim.sock
criData: /var/lib/containerd
defaultVIP: 10.103.97.2
disableApparmor: "false"
registryConfig: /etc/registry
registryData: /var/lib/registry
registryDomain: sealos.hub
registryPassword: passw0rd
registryPort: "5000"
registryUsername: admin
sandboxImage: pause:3.9
imageName: labring/kubernetes:v1.27.6
labels:
check: check.sh $registryData
clean: clean.sh && bash clean-cri.sh $criData
clean-registry: clean-registry.sh $registryData $registryConfig
image: ghcr.io/labring/lvscare:v4.3.5
init: init-cri.sh $registryDomain $registryPort && bash init.sh
init-registry: init-registry.sh $registryData $registryConfig
io.buildah.version: 1.30.0
org.opencontainers.image.description: kubernetes-v1.27.6 container image
org.opencontainers.image.licenses: MIT
org.opencontainers.image.source: https://github.com/labring-actions/cache
sealos.io.type: rootfs
sealos.io.version: v1beta1
version: v1.27.6
vip: $defaultVIP
mountPoint: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged
name: default-g23pfjqw
type: rootfs
- cmd:
- cp -f opt/helm /usr/bin/helm
imageName: labring/helm:v3.12.3
labels:
io.buildah.version: 1.30.0
mountPoint: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged
name: default-mamxdvth
type: application
- cmd:
- helm upgrade -i calico charts/calico -f charts/calico.values.yaml -n tigera-operator
--create-namespace
imageName: labring/calico:v3.26.1
labels:
io.buildah.version: 1.30.0
mountPoint: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged
name: default-xq4qhf1k
type: application
phase: ClusterSuccess
]
2023-10-13T17:22:29 info
___ ___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\__\ /\ \ /\ \
/::\ \ /::\ \ /::\ \ /:/ / /::\ \ /::\ \
/:/\ \ \ /:/\:\ \ /:/\:\ \ /:/ / /:/\:\ \ /:/\ \ \
_\:\~\ \ \ /::\~\:\ \ /::\~\:\ \ /:/ / /:/ \:\ \ _\:\~\ \ \
/\ \:\ \ \__\ /:/\:\ \:\__\ /:/\:\ \:\__\ /:/__/ /:/__/ \:\__\ /\ \:\ \ \__\
\:\ \:\ \/__/ \:\~\:\ \/__/ \/__\:\/:/ / \:\ \ \:\ \ /:/ / \:\ \:\ \/__/
\:\ \:\__\ \:\ \:\__\ \::/ / \:\ \ \:\ /:/ / \:\ \:\__\
\:\/:/ / \:\ \/__/ /:/ / \:\ \ \:\/:/ / \:\/:/ /
\::/ / \:\__\ /:/ / \:\__\ \::/ / \::/ /
\/__/ \/__/ \/__/ \/__/ \/__/ \/__/
Website: https://www.sealos.io/
Address: github.com/labring/sealos
Version: 4.3.5-881c10cb
看到这个图标,恭喜你成功了!
kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 31m v1.27.6
k8s-node1 Ready <none> 30m v1.27.6
目前只用了1台master和node,下一篇sealos4.3.5安装手册(二)追加节点 使用join添加节点
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。