当前位置:   article > 正文

sealos4.3.5安装手册(一)部署集群_sealos安装和部署

sealos安装和部署

学习使用sealos部署k8s,发现目前大部分都是sealos3的文章,中间有几个小坑,记录一下最终成功的操作步骤。

前置准备
5台虚拟机

hostnameip类型
k8s-master1192.168.1.41主节点
k8s-master2192.168.1.42主节点
k8s-master3192.168.1.43主节点
k8s-node1192.168.1.46工作点
k8s-node2192.168.1.47工作点

1. 升级centos 内核

查看升级的镜像资源
http://elrepo.org/tiki/Download

# 清华节点
rpm -Uvh https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/elrepo-release-7.0-6.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 查看内核
grep initrd16 /boot/grub2/grub.cfg
# 设置启动时选择哪个内核
grub2-set-default 0

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

2. 关闭selinux

setenforce 0
# 修改SELINUX=enforcing 为 SELINUX=disabled
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
  • 1
  • 2
  • 3

3. 设置主机名

每台主机独立运行

# xxx 替换想要设置的主机名
hostnamectl set-hostname xxx
  • 1
  • 2

所有主机运行

cat > /etc/hosts <<-'EOF'
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.41 k8s-master1
192.168.1.42 k8s-master2
192.168.1.43 k8s-master3
192.168.1.46 k8s-node1
192.168.1.47 k8s-node2
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

4. 同步服务器时间

  1. 时间服务器配置(1台)
yum install chrony -y
# 手动修改
# vi /etc/chrony.conf
# 修改三项
# server 127.127.1.0 iburst
# allow 192.168.1.0/24
# local stratum 10

# 命令修改
# 去除多余的 server
sed -i 's/server /# server/g' /etc/chrony.conf
# 设置本地同步
sed -i '6a\server 127.127.1.0 iburst' /etc/chrony.conf
# 允许ip段
sed -i 's/#allow 192.168.0.0/allow 192.168.1.0/g' /etc/chrony.conf
sed -i 's/#local /local /g' /etc/chrony.conf

systemctl restart chronyd
systemctl enable chronyd --now

# 同步时间
chronyc sources

# 显示如下
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? 127.127.1.0                   0   7     0     -     +0ns[   +0ns] +/-    0ns

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  1. 其他需要同步时间配置(除了时间服务器,其他所有)
yum install chrony -y
# 手动修改
# vi /etc/chrony.conf
# 设置同步服务器ip
# server 192.168.1.41 iburst


# 命令修改
# 去除多余的 server
sed -i 's/server /# server/g' /etc/chrony.conf
# 设置同步服务器ip
sed -i '6a\server 192.168.1.41 iburst' /etc/chrony.conf

systemctl restart chronyd
systemctl enable chronyd --now

# 同步时间
chronyc sources

# 显示如下
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? k8s-master1                   0   6     0     -     +0ns[   +0ns] +/-    0ns
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

5. 关闭swap交换分区

chronyc -a makestep

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
  • 1
  • 2
  • 3
  • 4

6. 日志

设置 rsyslogd 和 systemd journald

mkdir -p /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent

# 压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

# 最大占用空间 10G
SystemMaxUse=10G

# 单日志文件最大 200M
SystemMaxFileSize=200M

# 日志保存时间 2 周
MaxRetentionSec=2week

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

systemctl restart systemd-journald
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28

7. 重启

reboot
  • 1

8. kube-proxy开启ipvs的前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

9. 获取sealos

sealos官方文档

sealos GitHub

获取最新的 releases 版本

将包解压,只需要解压后的sealos文件

tar -zxvf sealos_4.3.5_linux_amd64.tar.gz 
cp sealos /usr/bin/
  • 1
  • 2

10. k8s一键安装

# --debug调试模式,方便学习、排查和了解执行步骤
sealos run labring/kubernetes:v1.27.6 labring/helm:v3.12.3 labring/calico:v3.26.1 --masters 192.168.1.41 --nodes 192.168.1.46 --passwd 'kgb007' --debug
  • 1
  • 2

安装失败,清空集群,重新安装

# 不询问,直接清空
sealos reset --force=true
  • 1
  • 2

执行日志

[root@k8s-master1 sealos]# sealos run labring/kubernetes:v1.27.6 labring/helm:v3.12.3 labring/calico:v3.26.1 --masters 192.168.1.41 --nodes 192.168.1.46 --passwd 'kgb007' --debug
2023-10-13T17:19:21 debug create new buildah config /etc/containers/policy.json cause it's not exist
2023-10-13T17:19:21 debug create new buildah config /etc/containers/registries.conf cause it's not exist
2023-10-13T17:19:21 debug using file /etc/containers/storage.conf as container storage config
2023-10-13T17:19:21 debug create new buildah config /etc/containers/storage.conf cause it's not exist
2023-10-13T17:19:21 debug creating new cluster
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug defaultPort: 22
2023-10-13T17:19:21 debug start to exec `arch` on 192.168.1.46:22
2023-10-13T17:19:21 debug defaultPort: 22
2023-10-13T17:19:21 debug cluster info: apiVersion: apps.sealos.io/v1beta1
kind: Cluster
metadata:
  creationTimestamp: null
  name: default
spec:
  hosts:
  - ips:
    - 192.168.1.41:22
    roles:
    - master
    - amd64
  - ips:
    - 192.168.1.46:22
    roles:
    - node
    - amd64
  image:
  - labring/kubernetes:v1.27.6
  - labring/helm:v3.12.3
  - labring/calico:v3.26.1
  ssh:
    passwd: kgb007
status: {}

2023-10-13T17:19:21 info Start to create a new cluster: master [192.168.1.41], worker [192.168.1.46], registry 192.168.1.41
2023-10-13T17:19:21 info Executing pipeline Check in CreateProcessor.
2023-10-13T17:19:21 info checker:hostname [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:19:21 debug start to exec remote 192.168.1.41:22 shell: hostname
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug start to exec remote 192.168.1.46:22 shell: hostname
2023-10-13T17:19:21 debug start to exec `hostname` on 192.168.1.46:22
2023-10-13T17:19:21 info checker:timeSync [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:19:21 debug start to exec remote 192.168.1.41:22 shell: date +%s
2023-10-13T17:19:21 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:19:21 debug start to exec remote 192.168.1.46:22 shell: date +%s
2023-10-13T17:19:21 debug start to exec `date +%s` on 192.168.1.46:22
2023-10-13T17:19:22 info Executing pipeline PreProcess in CreateProcessor.
2023-10-13T17:19:22 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:19:22 debug parse reference //labring/kubernetes:v1.27.6 with transport docker
Resolving "labring/kubernetes" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/kubernetes:v1.27.6...
Getting image source signatures
Copying blob 6dafa313b3ad done  
Copying blob 3acc1385eb7c done  
Copying blob 147e29300f47 done  
Copying blob 3796a06c178b done  
Copying config 2c7407e39c done  
Writing manifest to image destination
Storing signatures
2023-10-13T17:19:54 debug images 2c7407e39c29a3c17b046a8d1635eb156cb8ab49df2e41e1f7063b3ff4ef4229 are pulled
2023-10-13T17:19:54 debug Pull Policy for pull [missing]
2023-10-13T17:19:54 debug parse reference 2c7407e39c29a3c17b046a8d1635eb156cb8ab49df2e41e1f7063b3ff4ef4229 with transport containers-storage
2023-10-13T17:19:54 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:19:54 debug parse reference //labring/helm:v3.12.3 with transport docker
Resolving "labring/helm" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/helm:v3.12.3...
Getting image source signatures
Copying blob 4d7d76460a76 done  
Copying config 446441537f done  
Writing manifest to image destination
Storing signatures
2023-10-13T17:20:06 debug images 446441537f66d30e46d67a16c242e5b4de1b033524ee0c54370d09f5e0c4d3e0 are pulled
2023-10-13T17:20:06 debug Pull Policy for pull [missing]
2023-10-13T17:20:06 debug parse reference 446441537f66d30e46d67a16c242e5b4de1b033524ee0c54370d09f5e0c4d3e0 with transport containers-storage
2023-10-13T17:20:06 debug cannot find image in local storage, trying to inspect from remote
2023-10-13T17:20:06 debug parse reference //labring/calico:v3.26.1 with transport docker
Resolving "labring/calico" using unqualified-search registries (/etc/containers/registries.conf)
Trying to pull docker.io/labring/calico:v3.26.1...
Getting image source signatures
Copying blob 26a63b9f87d4 done  
Copying config 1e8350ad92 done  
Writing manifest to image destination
Storing signatures
2023-10-13T17:20:35 debug images 1e8350ad92f040e3b065783b8d42441f4c4673394d48d5219fd4d8c6aef00bfc are pulled
2023-10-13T17:20:35 debug Pull Policy for pull [missing]
2023-10-13T17:20:35 debug parse reference 1e8350ad92f040e3b065783b8d42441f4c4673394d48d5219fd4d8c6aef00bfc with transport containers-storage
2023-10-13T17:20:35 info Executing pipeline RunConfig in CreateProcessor.
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 debug clusterfile config is empty!
2023-10-13T17:20:35 info Executing pipeline MountRootfs in CreateProcessor.
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/etc
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/manifests
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/manifests
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts
2023-10-13T17:20:35 debug render env dir: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/manifests
2023-10-13T17:20:46 debug send mount image, target: 192.168.1.46:22, image: labring/kubernetes:v1.27.6, type: rootfs
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/Kubefile to dst /var/lib/sealos/data/default/rootfs/Kubefile
2023-10-13T17:20:46 debug send mount image, target: 192.168.1.41:22, image: labring/kubernetes:v1.27.6, type: rootfs
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/Kubefile to dst /var/lib/sealos/data/default/rootfs/Kubefile
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/README.md to dst /var/lib/sealos/data/default/rootfs/README.md
2023-10-13T17:20:46 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/bin to dst /var/lib/sealos/data/default/rootfs/bin
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/README.md to dst /var/lib/sealos/data/default/rootfs/README.md
2023-10-13T17:20:46 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/bin to dst /var/lib/sealos/data/default/rootfs/bin
[1/1]copying files to 192.168.1.46:22  20% [==>            ] (1/5, 103 it/s) [0s:0s]2023-10-13T17:20:48 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/cri to dst /var/lib/sealos/data/default/rootfs/cri
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc to dst /var/lib/sealos/data/default/rootfs/etc
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/images to dst /var/lib/sealos/data/default/rootfs/images
2023-10-13T17:20:49 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/opt to dst /var/lib/sealos/data/default/rootfs/opt
2023-10-13T17:20:50 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts to dst /var/lib/sealos/data/default/rootfs/scripts
2023-10-13T17:20:50 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/statics to dst /var/lib/sealos/data/default/rootfs/statics
2023-10-13T17:21:07 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/cri to dst /var/lib/sealos/data/default/rootfs/cri
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/etc to dst /var/lib/sealos/data/default/rootfs/etc
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/images to dst /var/lib/sealos/data/default/rootfs/images
2023-10-13T17:21:13 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/opt to dst /var/lib/sealos/data/default/rootfs/opt
2023-10-13T17:21:18 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/scripts to dst /var/lib/sealos/data/default/rootfs/scripts
2023-10-13T17:21:18 debug remote copy files src /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged/statics to dst /var/lib/sealos/data/default/rootfs/statics
2023-10-13T17:21:18 debug send mount image, target: 192.168.1.41:22, image: labring/calico:v3.26.1, type: application
2023-10-13T17:21:18 debug send mount image, target: 192.168.1.41:22, image: labring/helm:v3.12.3, type: application
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/Kubefile to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/Kubefile
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged/opt to dst /var/lib/sealos/data/default/applications/default-mamxdvth/workdir/opt
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/charts to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/charts
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/images to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/images
2023-10-13T17:21:18 debug local 192.168.1.41:22 copy files src /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged/init.sh to dst /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir/init.sh
2023-10-13T17:21:18 info Executing pipeline MirrorRegistry in CreateProcessor.
2023-10-13T17:21:18 debug registry nodes is: [192.168.1.41:22]
2023-10-13T17:21:18 info trying default http mode to sync images to hosts [192.168.1.41:22]
2023-10-13T17:21:18 debug checking if endpoint http://192.168.1.41:5050 is alive
2023-10-13T17:21:18 debug running temporary registry on host 192.168.1.41:22
2023-10-13T17:21:18 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl registry serve filesystem -p 5050 --disable-logging=true /var/lib/sealos/data/default/rootfs/registry` via exec
2023-10-13T17:21:18 debug http endpoint http://192.168.1.41:5050 is alive
2023-10-13T17:21:18 debug checking if endpoint http://127.0.0.1:37493 is alive
2023-10-13T17:21:18 debug checking if endpoint http://127.0.0.1:32835 is alive
2023-10-13T17:21:18 debug http endpoint http://127.0.0.1:37493 is alive
2023-10-13T17:21:18 debug http endpoint http://127.0.0.1:32835 is alive
2023-10-13T17:21:18 debug syncing repos [{coredns/coredns  0 false false false} {etcd  0 false false false} {kube-apiserver  0 false false false} {kube-controller-manager  0 false false false} {kube-proxy  0 false false false} {kube-scheduler  0 false false false} {labring/lvscare  0 false false false} {pause  0 false false false}] from 127.0.0.1:37493 to 192.168.1.41:5050
2023-10-13T17:21:18 debug syncing repos [{calico/apiserver  0 false false false} {calico/cni  0 false false false} {calico/csi  0 false false false} {calico/kube-controllers  0 false false false} {calico/node  0 false false false} {calico/node-driver-registrar  0 false false false} {calico/pod2daemon-flexvol  0 false false false} {calico/typha  0 false false false} {tigera/key-cert-provisioner  0 false false false} {tigera/operator  0 false false false}] from 127.0.0.1:32835 to 192.168.1.41:5050
2023-10-13T17:21:18 debug syncing 192.168.1.41:5050/calico/apiserver:v3.26.1 with selection 1
2023-10-13T17:21:18 debug syncing 192.168.1.41:5050/coredns/coredns:v1.10.1 with selection 1
2023-10-13T17:21:20 debug syncing 192.168.1.41:5050/coredns/coredns:v1.10.1 with selection 0
2023-10-13T17:21:20 debug syncing 192.168.1.41:5050/etcd:3.5.7-0 with selection 1
2023-10-13T17:21:22 debug syncing 192.168.1.41:5050/calico/apiserver:v3.26.1 with selection 0
2023-10-13T17:21:22 debug syncing 192.168.1.41:5050/calico/cni:v3.26.1 with selection 1
2023-10-13T17:21:24 debug syncing 192.168.1.41:5050/etcd:3.5.7-0 with selection 0
2023-10-13T17:21:24 debug syncing 192.168.1.41:5050/kube-apiserver:v1.27.6 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/cni:v3.26.1 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/csi:v3.26.1 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/kube-apiserver:v1.27.6 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/kube-controller-manager:v1.27.6 with selection 1
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/csi:v3.26.1 with selection 0
2023-10-13T17:21:26 debug syncing 192.168.1.41:5050/calico/kube-controllers:v3.26.1 with selection 1
2023-10-13T17:21:27 debug syncing 192.168.1.41:5050/kube-controller-manager:v1.27.6 with selection 0
2023-10-13T17:21:27 debug syncing 192.168.1.41:5050/kube-proxy:v1.27.6 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/calico/kube-controllers:v3.26.1 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/calico/node:v3.26.1 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-proxy:v1.27.6 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-scheduler:v1.27.6 with selection 1
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/kube-scheduler:v1.27.6 with selection 0
2023-10-13T17:21:28 debug syncing 192.168.1.41:5050/labring/lvscare:v4.3.5 with selection 1
2023-10-13T17:21:29 debug syncing 192.168.1.41:5050/labring/lvscare:v4.3.5 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/pause:3.9 with selection 1
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/pause:3.9 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node:v3.26.1 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node-driver-registrar:v3.26.1 with selection 1
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/node-driver-registrar:v3.26.1 with selection 0
2023-10-13T17:21:30 debug syncing 192.168.1.41:5050/calico/pod2daemon-flexvol:v3.26.1 with selection 1
2023-10-13T17:21:31 debug syncing 192.168.1.41:5050/calico/pod2daemon-flexvol:v3.26.1 with selection 0
2023-10-13T17:21:31 debug syncing 192.168.1.41:5050/calico/typha:v3.26.1 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/calico/typha:v3.26.1 with selection 0
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/key-cert-provisioner:v1.1.9 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/key-cert-provisioner:v1.1.9 with selection 0
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/operator:v1.30.4 with selection 1
2023-10-13T17:21:32 debug syncing 192.168.1.41:5050/tigera/operator:v1.30.4 with selection 0
2023-10-13T17:21:32 info Executing pipeline Bootstrap in CreateProcessor
2023-10-13T17:21:32 debug apply [default_checker registry_host_applier registry_applier initializer] on hosts [192.168.1.41:22 192.168.1.46:22]
2023-10-13T17:21:32 debug apply default_checker on host 192.168.1.46:22
2023-10-13T17:21:32 debug apply default_checker on host 192.168.1.41:22
2023-10-13T17:21:32 debug start to exec `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPassword="passw0rd" registryUsername="admin" registryData="/var/lib/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" defaultVIP="10.103.97.2" registryDomain="sealos.hub" criData="/var/lib/containerd" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_SEALOS_VERSION="4.3.5" sandboxImage="pause:3.9" registryPort="5000" disableApparmor="false" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" registryConfig="/etc/registry" ; bash check.sh $registryData` on 192.168.1.46:22
2023-10-13T17:21:32 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPort="5000" registryUsername="admin" disableApparmor="false" SEALOS_SYS_SEALOS_VERSION="4.3.5" defaultVIP="10.103.97.2" registryDomain="sealos.hub" criData="/var/lib/containerd" registryPassword="passw0rd" sandboxImage="pause:3.9" registryConfig="/etc/registry" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" SEALOS_SYS_KUBE_VERSION="v1.27.6" registryData="/var/lib/registry" ; bash check.sh $registryData` via exec
 INFO [2023-10-13 17:21:32] >> Check port kubelet port 10249..10259, reserved port 5050..5054 inuse. Please wait... 
192.168.1.46:22	 INFO [2023-10-13 17:21:32] >> Check port kubelet port 10249..10259, reserved port 5050..5054 inuse. Please wait... 
which: no docker in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin)
 WARN [2023-10-13 17:21:33] >> Replace disable_apparmor = false to disable_apparmor = true 
 INFO [2023-10-13 17:21:33] >> check root,port,cri success 
192.168.1.46:22	which: no docker in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin)
192.168.1.46:22	 WARN [2023-10-13 17:21:33] >> Replace disable_apparmor = false to disable_apparmor = true 
192.168.1.46:22	 INFO [2023-10-13 17:21:33] >> check root,port,cri success 
2023-10-13T17:21:33 debug apply registry_host_applier on host 192.168.1.46:22
2023-10-13T17:21:33 debug apply registry_host_applier on host 192.168.1.41:22
2023-10-13T17:21:33 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:33 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:33 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"

2023-10-13T17:21:33 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"

2023-10-13T17:21:33 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:33 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:33 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl  hosts add --ip 192.168.1.41  --domain sealos.hub` via exec
2023-10-13T17:21:33 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl  hosts add --ip 192.168.1.41  --domain sealos.hub` on 192.168.1.46:22
2023-10-13T17:21:33 info domain sealos.hub:192.168.1.41 append success
192.168.1.46:22	2023-10-13T17:21:33 info domain sealos.hub:192.168.1.41 append success
2023-10-13T17:21:34 debug apply registry_applier on host 192.168.1.41:22
2023-10-13T17:21:34 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:34 debug registry config data info: # Copyright © 2022 sealos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

domain: sealos.hub
port: "5000"
username: "admin"
password: "passw0rd"
data: "/var/lib/registry"

2023-10-13T17:21:34 debug show registry info, IP: 192.168.1.41:22, Domain: sealos.hub, Data: /var/lib/registry
2023-10-13T17:21:34 debug make soft link: rm -rf /var/lib/registry && ln -s /var/lib/sealos/data/default/rootfs/registry /var/lib/registry
2023-10-13T17:21:34 debug start to run command `rm -rf /var/lib/registry && ln -s /var/lib/sealos/data/default/rootfs/registry /var/lib/registry` via exec
2023-10-13T17:21:34 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/registry_htpasswd to dst /var/lib/sealos/data/default/rootfs/etc/registry_htpasswd
2023-10-13T17:21:34 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryDomain="sealos.hub" disableApparmor="false" sandboxImage="pause:3.9" registryPort="5000" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" criData="/var/lib/containerd" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" registryConfig="/etc/registry" registryUsername="admin" defaultVIP="10.103.97.2" registryData="/var/lib/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" SEALOS_SYS_SEALOS_VERSION="4.3.5" registryPassword="passw0rd" ; bash init-registry.sh $registryData $registryConfig` via exec
Created symlink from /etc/systemd/system/multi-user.target.wants/registry.service to /etc/systemd/system/registry.service.
 INFO [2023-10-13 17:21:34] >> Health check registry! 
 INFO [2023-10-13 17:21:34] >> registry is running 
 INFO [2023-10-13 17:21:34] >> init registry success 
2023-10-13T17:21:34 debug apply initializer on host 192.168.1.46:22
2023-10-13T17:21:34 debug apply initializer on host 192.168.1.41:22
2023-10-13T17:21:34 debug start to run command `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPassword="passw0rd" registryPort="5000" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" registryUsername="admin" SEALOS_SYS_SEALOS_VERSION="4.3.5" defaultVIP="10.103.97.2" registryDomain="sealos.hub" registryData="/var/lib/registry" criData="/var/lib/containerd" disableApparmor="false" sandboxImage="pause:3.9" registryConfig="/etc/registry" SEALOS_SYS_KUBE_VERSION="v1.27.6" ; bash init-cri.sh $registryDomain $registryPort && bash init.sh` via exec
2023-10-13T17:21:34 debug start to exec `cd /var/lib/sealos/data/default/rootfs/scripts && export registryPort="5000" registryUsername="admin" registryDomain="sealos.hub" registryPassword="passw0rd" SEALOS_SYS_IMAGE_ENDPOINT="/var/run/image-cri-shim.sock" registryData="/var/lib/registry" criData="/var/lib/containerd" disableApparmor="false" sandboxImage="pause:3.9" defaultVIP="10.103.97.2" SEALOS_SYS_CRI_ENDPOINT="/var/run/containerd/containerd.sock" SEALOS_SYS_KUBE_VERSION="v1.27.6" SEALOS_SYS_SEALOS_VERSION="4.3.5" registryConfig="/etc/registry" ; bash init-cri.sh $registryDomain $registryPort && bash init.sh` on 192.168.1.46:22
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/containerd.service.
192.168.1.46:22	Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/containerd.service.
 INFO [2023-10-13 17:21:38] >> Health check containerd! 
 INFO [2023-10-13 17:21:38] >> containerd is running 
 INFO [2023-10-13 17:21:38] >> init containerd success 
Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/image-cri-shim.service.
 INFO [2023-10-13 17:21:38] >> Health check image-cri-shim! 
 INFO [2023-10-13 17:21:38] >> image-cri-shim is running 
 INFO [2023-10-13 17:21:38] >> init shim success 
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> Health check containerd! 
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> containerd is running 
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> init containerd success 
192.168.1.46:22	Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/image-cri-shim.service.
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> Health check image-cri-shim! 
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> image-cri-shim is running 
192.168.1.46:22	 INFO [2023-10-13 17:21:38] >> init shim success 
192.168.1.46:22	127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.46:22	::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
fs.file-max = 1048576 # sealos
net.bridge.bridge-nf-call-ip6tables = 1 # sealos
net.bridge.bridge-nf-call-iptables = 1 # sealos
net.core.somaxconn = 65535 # sealos
net.ipv4.conf.all.rp_filter = 0 # sealos
net.ipv4.ip_forward = 1 # sealos
net.ipv4.ip_local_port_range = 1024 65535 # sealos
net.ipv4.tcp_keepalive_intvl = 30 # sealos
net.ipv4.tcp_keepalive_time = 600 # sealos
net.ipv4.vs.conn_reuse_mode = 0 # sealos
net.ipv4.vs.conntrack = 1 # sealos
net.ipv6.conf.all.forwarding = 1 # sealos
* Applying /etc/sysctl.conf ...
fs.file-max = 1048576 # sealos
net.bridge.bridge-nf-call-ip6tables = 1 # sealos
net.bridge.bridge-nf-call-iptables = 1 # sealos
net.core.somaxconn = 65535 # sealos
net.ipv4.conf.all.rp_filter = 0 # sealos
net.ipv4.ip_forward = 1 # sealos
net.ipv4.ip_local_port_range = 1024 65535 # sealos
net.ipv4.tcp_keepalive_intvl = 30 # sealos
net.ipv4.tcp_keepalive_time = 600 # sealos
net.ipv4.vs.conn_reuse_mode = 0 # sealos
net.ipv4.vs.conntrack = 1 # sealos
net.ipv6.conf.all.forwarding = 1 # sealos
 INFO [2023-10-13 17:21:40] >> pull pause image sealos.hub:5000/pause:3.9 
192.168.1.46:22	Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
192.168.1.46:22	Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
192.168.1.46:22	* Applying /usr/lib/sysctl.d/00-system.conf ...
192.168.1.46:22	net.bridge.bridge-nf-call-ip6tables = 0
192.168.1.46:22	net.bridge.bridge-nf-call-iptables = 0
192.168.1.46:22	net.bridge.bridge-nf-call-arptables = 0
192.168.1.46:22	* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
192.168.1.46:22	kernel.yama.ptrace_scope = 0
192.168.1.46:22	* Applying /usr/lib/sysctl.d/50-default.conf ...
192.168.1.46:22	kernel.sysrq = 16
192.168.1.46:22	kernel.core_uses_pid = 1
192.168.1.46:22	kernel.kptr_restrict = 1
192.168.1.46:22	net.ipv4.conf.default.rp_filter = 1
192.168.1.46:22	net.ipv4.conf.all.rp_filter = 1
192.168.1.46:22	net.ipv4.conf.default.accept_source_route = 0
192.168.1.46:22	net.ipv4.conf.all.accept_source_route = 0
192.168.1.46:22	net.ipv4.conf.default.promote_secondaries = 1
192.168.1.46:22	net.ipv4.conf.all.promote_secondaries = 1
192.168.1.46:22	fs.protected_hardlinks = 1
192.168.1.46:22	fs.protected_symlinks = 1
192.168.1.46:22	* Applying /etc/sysctl.d/99-sysctl.conf ...
192.168.1.46:22	fs.file-max = 1048576 # sealos
192.168.1.46:22	net.bridge.bridge-nf-call-ip6tables = 1 # sealos
192.168.1.46:22	net.bridge.bridge-nf-call-iptables = 1 # sealos
192.168.1.46:22	net.core.somaxconn = 65535 # sealos
192.168.1.46:22	net.ipv4.conf.all.rp_filter = 0 # sealos
192.168.1.46:22	net.ipv4.ip_forward = 1 # sealos
192.168.1.46:22	net.ipv4.ip_local_port_range = 1024 65535 # sealos
192.168.1.46:22	net.ipv4.tcp_keepalive_intvl = 30 # sealos
192.168.1.46:22	net.ipv4.tcp_keepalive_time = 600 # sealos
192.168.1.46:22	net.ipv4.vs.conn_reuse_mode = 0 # sealos
192.168.1.46:22	net.ipv4.vs.conntrack = 1 # sealos
192.168.1.46:22	net.ipv6.conf.all.forwarding = 1 # sealos
192.168.1.46:22	* Applying /etc/sysctl.conf ...
192.168.1.46:22	fs.file-max = 1048576 # sealos
192.168.1.46:22	net.bridge.bridge-nf-call-ip6tables = 1 # sealos
192.168.1.46:22	net.bridge.bridge-nf-call-iptables = 1 # sealos
192.168.1.46:22	net.core.somaxconn = 65535 # sealos
192.168.1.46:22	net.ipv4.conf.all.rp_filter = 0 # sealos
192.168.1.46:22	net.ipv4.ip_forward = 1 # sealos
192.168.1.46:22	net.ipv4.ip_local_port_range = 1024 65535 # sealos
192.168.1.46:22	net.ipv4.tcp_keepalive_intvl = 30 # sealos
192.168.1.46:22	net.ipv4.tcp_keepalive_time = 600 # sealos
192.168.1.46:22	net.ipv4.vs.conn_reuse_mode = 0 # sealos
192.168.1.46:22	net.ipv4.vs.conntrack = 1 # sealos
192.168.1.46:22	net.ipv6.conf.all.forwarding = 1 # sealos
Image is up to date for sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
 INFO [2023-10-13 17:21:40] >> init kubelet success 
 INFO [2023-10-13 17:21:40] >> init rootfs success 
192.168.1.46:22	 INFO [2023-10-13 17:21:40] >> pull pause image sealos.hub:5000/pause:3.9 
192.168.1.46:22	Image is up to date for sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
192.168.1.46:22	Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
192.168.1.46:22	 INFO [2023-10-13 17:21:41] >> init kubelet success 
192.168.1.46:22	 INFO [2023-10-13 17:21:41] >> init rootfs success 
2023-10-13T17:21:41 info Executing pipeline Init in CreateProcessor.
2023-10-13T17:21:41 info start to copy kubeadm config to master0
2023-10-13T17:21:41 debug using default kubeadm config
2023-10-13T17:21:41 debug skip merging kubeadm configs from cause file /var/lib/sealos/data/default/rootfs/etc/kubeadm.yml not exists
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri socket
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:41 debug get nodes [192.168.1.41:22] cri socket is [/run/containerd/containerd.sock]
2023-10-13T17:21:41 debug node: 192.168.1.41:22 , criSocket: /run/containerd/containerd.sock
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:41 debug get nodes [192.168.1.41:22] cgroup driver is [systemd]
2023-10-13T17:21:41 debug node: 192.168.1.41:22 , cGroupDriver: systemd
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:41 debug get vip is 10.103.97.2
2023-10-13T17:21:41 debug override defaults of kubelet configuration
2023-10-13T17:21:41 debug local 192.168.1.41:22 copy files src /root/.sealos/default/tmp/kubeadm-init.yaml to dst /root/.sealos/default/etc/kubeadm-init.yaml
2023-10-13T17:21:41 info start to generate cert and kubeConfig...
2023-10-13T17:21:41 debug start to run command `rm -rf /etc/kubernetes/admin.conf` via exec
2023-10-13T17:21:41 info start to generator cert and copy to masters...
2023-10-13T17:21:41 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl hostname
2023-10-13T17:21:41 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:42 info apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local k8s-master1:k8s-master1 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 192.168.1.41:192.168.1.41]}
2023-10-13T17:21:42 info Etcd altnames : {map[k8s-master1:k8s-master1 localhost:localhost] map[127.0.0.1:127.0.0.1 192.168.1.41:192.168.1.41 ::1:::1]}, commonName : k8s-master1
2023-10-13T17:21:44 debug cert.GenerateCert getServiceCIDR  10.96.0.0/22
2023-10-13T17:21:44 debug cert.GenerateCert param: /root/.sealos/default/pki /root/.sealos/default/pki/etcd [127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41] 192.168.1.41 k8s-master1 10.96.0.0/22 cluster.local
2023-10-13T17:21:44 info start to copy etc pki files to masters
2023-10-13T17:21:44 debug local 192.168.1.41:22 copy files src /root/.sealos/default/pki to dst /etc/kubernetes/pki
2023-10-13T17:21:44 info start to copy etc pki files to masters
2023-10-13T17:21:44 debug local 192.168.1.41:22 copy files src /root/.sealos/default/pki to dst /etc/kubernetes/pki
2023-10-13T17:21:44 info start to create kubeconfig...
2023-10-13T17:21:44 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl hostname
2023-10-13T17:21:44 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:21:44 debug [kubeconfig] Writing "admin.conf" kubeconfig file

2023-10-13T17:21:45 debug [kubeconfig] Writing "controller-manager.conf" kubeconfig file

2023-10-13T17:21:45 debug [kubeconfig] Writing "scheduler.conf" kubeconfig file

2023-10-13T17:21:45 debug [kubeconfig] Writing "kubelet.conf" kubeconfig file

2023-10-13T17:21:45 info start to copy kubeconfig files to masters
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/admin.conf to dst /etc/kubernetes/admin.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/controller-manager.conf to dst /etc/kubernetes/controller-manager.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/scheduler.conf to dst /etc/kubernetes/scheduler.conf
2023-10-13T17:21:45 debug local 192.168.1.41:22 copy files src /root/.sealos/default/etc/kubelet.conf to dst /etc/kubernetes/kubelet.conf
2023-10-13T17:21:45 info start to copy static files to masters
2023-10-13T17:21:45 debug start to run command `mkdir -p /etc/kubernetes && cp -f /var/lib/sealos/data/default/rootfs/statics/audit-policy.yml /etc/kubernetes/audit-policy.yml` via exec
2023-10-13T17:21:45 info start to init master0...
2023-10-13T17:21:45 debug start to run command `/var/lib/sealos/data/default/rootfs/opt/sealctl  hosts add --ip 192.168.1.41  --domain apiserver.cluster.local` via exec
2023-10-13T17:21:45 info domain apiserver.cluster.local:192.168.1.41 append success
2023-10-13T17:21:45 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:21:45 debug get vip is 10.103.97.2
2023-10-13T17:21:45 debug start to run command `kubeadm init --config=/root/.sealos/default/etc/kubeadm-init.yaml --skip-certificate-key-print --skip-token-print -v 6 --ignore-preflight-errors=SystemVerification` via exec
I1013 17:21:45.997108    1977 initconfiguration.go:255] loading configuration from "/root/.sealos/default/etc/kubeadm-init.yaml"
W1013 17:21:46.002647    1977 initconfiguration.go:306] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: strict decoding error: unknown field "udpIdleTimeout"
W1013 17:21:46.003200    1977 configset.go:177] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: strict decoding error: unknown field "udpIdleTimeout"
W1013 17:21:46.004933    1977 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
W1013 17:21:46.004986    1977 utils.go:69] The recommended value for "healthzBindAddress" in "KubeletConfiguration" is: 127.0.0.1; the provided value is: 0.0.0.0
I1013 17:21:46.009210    1977 certs.go:519] validating certificate period for CA certificate
I1013 17:21:46.009356    1977 certs.go:519] validating certificate period for front-proxy CA certificate
[init] Using Kubernetes version: v1.27.6
[preflight] Running pre-flight checks
I1013 17:21:46.009548    1977 checks.go:563] validating Kubernetes and kubeadm version
I1013 17:21:46.009595    1977 checks.go:168] validating if the firewall is enabled and active
I1013 17:21:46.019772    1977 checks.go:203] validating availability of port 6443
I1013 17:21:46.019967    1977 checks.go:203] validating availability of port 10259
I1013 17:21:46.020034    1977 checks.go:203] validating availability of port 10257
I1013 17:21:46.020102    1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
I1013 17:21:46.020143    1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
I1013 17:21:46.020176    1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I1013 17:21:46.020189    1977 checks.go:280] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I1013 17:21:46.020201    1977 checks.go:430] validating if the connectivity type is via proxy or direct
I1013 17:21:46.020233    1977 checks.go:469] validating http connectivity to first IP address in the CIDR
I1013 17:21:46.020312    1977 checks.go:469] validating http connectivity to first IP address in the CIDR
I1013 17:21:46.020360    1977 checks.go:104] validating the container runtime
I1013 17:21:46.048194    1977 checks.go:639] validating whether swap is enabled or not
I1013 17:21:46.048292    1977 checks.go:370] validating the presence of executable crictl
I1013 17:21:46.048328    1977 checks.go:370] validating the presence of executable conntrack
I1013 17:21:46.048368    1977 checks.go:370] validating the presence of executable ip
I1013 17:21:46.048396    1977 checks.go:370] validating the presence of executable iptables
I1013 17:21:46.048425    1977 checks.go:370] validating the presence of executable mount
I1013 17:21:46.048460    1977 checks.go:370] validating the presence of executable nsenter
I1013 17:21:46.048541    1977 checks.go:370] validating the presence of executable ebtables
I1013 17:21:46.048572    1977 checks.go:370] validating the presence of executable ethtool
I1013 17:21:46.048627    1977 checks.go:370] validating the presence of executable socat
	[WARNING FileExisting-socat]: socat not found in system path
I1013 17:21:46.048709    1977 checks.go:370] validating the presence of executable tc
I1013 17:21:46.048755    1977 checks.go:370] validating the presence of executable touch
I1013 17:21:46.048788    1977 checks.go:516] running all checks
I1013 17:21:46.059477    1977 checks.go:401] checking whether the given node name is valid and reachable using net.LookupHost
I1013 17:21:46.059748    1977 checks.go:605] validating kubelet version
I1013 17:21:46.123390    1977 checks.go:130] validating if the "kubelet" service is enabled and active
I1013 17:21:46.132643    1977 checks.go:203] validating availability of port 10250
I1013 17:21:46.132733    1977 checks.go:329] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
I1013 17:21:46.132811    1977 checks.go:329] validating the contents of file /proc/sys/net/ipv4/ip_forward
I1013 17:21:46.132881    1977 checks.go:203] validating availability of port 2379
I1013 17:21:46.132945    1977 checks.go:203] validating availability of port 2380
I1013 17:21:46.133008    1977 checks.go:243] validating the existence and emptiness of directory /var/lib/etcd
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1013 17:21:46.133152    1977 checks.go:828] using image pull policy: IfNotPresent
I1013 17:21:46.247344    1977 checks.go:854] pulling: registry.k8s.io/kube-apiserver:v1.27.6
I1013 17:21:48.929039    1977 checks.go:854] pulling: registry.k8s.io/kube-controller-manager:v1.27.6
I1013 17:21:50.904674    1977 checks.go:854] pulling: registry.k8s.io/kube-scheduler:v1.27.6
I1013 17:21:52.250698    1977 checks.go:854] pulling: registry.k8s.io/kube-proxy:v1.27.6
W1013 17:21:54.076415    1977 checks.go:835] detected that the sandbox image "sealos.hub:5000/pause:3.9" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
I1013 17:21:54.218705    1977 checks.go:846] image exists: registry.k8s.io/pause:3.9
I1013 17:21:54.329121    1977 checks.go:854] pulling: registry.k8s.io/etcd:3.5.7-0
I1013 17:21:59.222597    1977 checks.go:854] pulling: registry.k8s.io/coredns/coredns:v1.10.1
[certs] Using certificateDir folder "/etc/kubernetes/pki"
I1013 17:22:00.424183    1977 certs.go:519] validating certificate period for ca certificate
[certs] Using existing ca certificate authority
I1013 17:22:00.424855    1977 certs.go:519] validating certificate period for apiserver certificate
[certs] Using existing apiserver certificate and key on disk
I1013 17:22:00.425468    1977 certs.go:519] validating certificate period for apiserver-kubelet-client certificate
[certs] Using existing apiserver-kubelet-client certificate and key on disk
I1013 17:22:00.426055    1977 certs.go:519] validating certificate period for front-proxy-ca certificate
[certs] Using existing front-proxy-ca certificate authority
I1013 17:22:00.426638    1977 certs.go:519] validating certificate period for front-proxy-client certificate
[certs] Using existing front-proxy-client certificate and key on disk
I1013 17:22:00.427209    1977 certs.go:519] validating certificate period for etcd/ca certificate
[certs] Using existing etcd/ca certificate authority
I1013 17:22:00.427832    1977 certs.go:519] validating certificate period for etcd/server certificate
[certs] Using existing etcd/server certificate and key on disk
I1013 17:22:00.428411    1977 certs.go:519] validating certificate period for etcd/peer certificate
[certs] Using existing etcd/peer certificate and key on disk
I1013 17:22:00.428947    1977 certs.go:519] validating certificate period for etcd/healthcheck-client certificate
[certs] Using existing etcd/healthcheck-client certificate and key on disk
I1013 17:22:00.429531    1977 certs.go:519] validating certificate period for apiserver-etcd-client certificate
[certs] Using existing apiserver-etcd-client certificate and key on disk
I1013 17:22:00.430061    1977 certs.go:78] creating new public/private key files for signing service account users
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1013 17:22:00.430595    1977 kubeconfig.go:103] creating kubeconfig file for admin.conf
I1013 17:22:00.520018    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/admin.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
I1013 17:22:00.520076    1977 kubeconfig.go:103] creating kubeconfig file for kubelet.conf
I1013 17:22:00.745409    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/kubelet.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
I1013 17:22:00.745501    1977 kubeconfig.go:103] creating kubeconfig file for controller-manager.conf
I1013 17:22:00.916114    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/controller-manager.conf
W1013 17:22:00.916161    1977 kubeconfig.go:264] a kubeconfig file "/etc/kubernetes/controller-manager.conf" exists already but has an unexpected API Server URL: expected: https://192.168.1.41:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
I1013 17:22:00.916192    1977 kubeconfig.go:103] creating kubeconfig file for scheduler.conf
I1013 17:22:01.073906    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/scheduler.conf
W1013 17:22:01.073948    1977 kubeconfig.go:264] a kubeconfig file "/etc/kubernetes/scheduler.conf" exists already but has an unexpected API Server URL: expected: https://192.168.1.41:6443, got: https://apiserver.cluster.local:6443
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
I1013 17:22:01.073985    1977 kubelet.go:67] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I1013 17:22:01.200938    1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.201293    1977 manifests.go:125] [control-plane] adding volume "audit" for component "kube-apiserver"
I1013 17:22:01.201312    1977 manifests.go:125] [control-plane] adding volume "audit-log" for component "kube-apiserver"
I1013 17:22:01.201324    1977 manifests.go:125] [control-plane] adding volume "ca-certs" for component "kube-apiserver"
I1013 17:22:01.201334    1977 manifests.go:125] [control-plane] adding volume "etc-pki" for component "kube-apiserver"
I1013 17:22:01.201346    1977 manifests.go:125] [control-plane] adding volume "k8s-certs" for component "kube-apiserver"
I1013 17:22:01.201356    1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-apiserver"
I1013 17:22:01.205107    1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I1013 17:22:01.205149    1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.206120    1977 manifests.go:125] [control-plane] adding volume "ca-certs" for component "kube-controller-manager"
I1013 17:22:01.206141    1977 manifests.go:125] [control-plane] adding volume "etc-pki" for component "kube-controller-manager"
I1013 17:22:01.206151    1977 manifests.go:125] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager"
I1013 17:22:01.206161    1977 manifests.go:125] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager"
I1013 17:22:01.206171    1977 manifests.go:125] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager"
I1013 17:22:01.206181    1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-controller-manager"
I1013 17:22:01.206956    1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[control-plane] Creating static Pod manifest for "kube-scheduler"
I1013 17:22:01.206979    1977 manifests.go:99] [control-plane] getting StaticPodSpecs
I1013 17:22:01.207255    1977 manifests.go:125] [control-plane] adding volume "kubeconfig" for component "kube-scheduler"
I1013 17:22:01.207271    1977 manifests.go:125] [control-plane] adding volume "localtime" for component "kube-scheduler"
I1013 17:22:01.207757    1977 manifests.go:154] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1013 17:22:01.208568    1977 local.go:65] [etcd] wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
I1013 17:22:01.208585    1977 waitcontrolplane.go:83] [wait-control-plane] Waiting for the API server to be healthy
I1013 17:22:01.208999    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/admin.conf
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1013 17:22:01.210509    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s  in 0 milliseconds
I1013 17:22:01.711816    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s  in 0 milliseconds
I1013 17:22:02.211939    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s  in 0 milliseconds
I1013 17:22:02.712126    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s  in 0 milliseconds
I1013 17:22:03.211976    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s  in 0 milliseconds
I1013 17:22:05.804729    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 2093 milliseconds
I1013 17:22:06.212873    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds
I1013 17:22:06.712275    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds
I1013 17:22:07.212989    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 500 Internal Server Error in 2 milliseconds
I1013 17:22:07.715485    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/healthz?timeout=10s 200 OK in 1 milliseconds
[apiclient] All control plane components are healthy after 6.505532 seconds
I1013 17:22:07.715580    1977 uploadconfig.go:112] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1013 17:22:07.721197    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.726186    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.733694    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 5 milliseconds
I1013 17:22:07.733941    1977 uploadconfig.go:126] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1013 17:22:07.742224    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 7 milliseconds
I1013 17:22:07.749284    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 6 milliseconds
I1013 17:22:07.754215    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 4 milliseconds
I1013 17:22:07.754406    1977 uploadconfig.go:131] [upload-config] Preserving the CRISocket information for the control-plane node
I1013 17:22:07.754424    1977 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///run/containerd/containerd.sock" to the Node API object "k8s-master1" as an annotation
I1013 17:22:08.257674    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 2 milliseconds
I1013 17:22:08.265524    1977 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 5 milliseconds
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
I1013 17:22:08.769230    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 2 milliseconds
I1013 17:22:08.776252    1977 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-master1?timeout=10s 200 OK in 5 milliseconds
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1013 17:22:08.778993    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/secrets/bootstrap-token-5w92af?timeout=10s 404 Not Found in 2 milliseconds
I1013 17:22:08.783684    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/secrets?timeout=10s 201 Created in 4 milliseconds
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1013 17:22:08.786779    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:08.790016    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1013 17:22:08.793691    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 3 milliseconds
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1013 17:22:08.796556    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1013 17:22:08.799523    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1013 17:22:08.799657    1977 clusterinfo.go:47] [bootstrap-token] loading admin kubeconfig
I1013 17:22:08.800137    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/admin.conf
I1013 17:22:08.800155    1977 clusterinfo.go:58] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig
I1013 17:22:08.800626    1977 clusterinfo.go:70] [bootstrap-token] creating/updating ConfigMap in kube-public namespace
I1013 17:22:08.803595    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-public/configmaps?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:08.803771    1977 clusterinfo.go:84] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace
I1013 17:22:08.807677    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:08.810897    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:08.811062    1977 kubeletfinalize.go:90] [kubelet-finalize] Assuming that kubelet client certificate rotation is enabled: found "/var/lib/kubelet/pki/kubelet-client-current.pem"
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1013 17:22:08.811619    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/kubelet.conf
I1013 17:22:08.812170    1977 kubeletfinalize.go:134] [kubelet-finalize] Restarting the kubelet to enable client certificate rotation
I1013 17:22:09.023934    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/deployments?labelSelector=k8s-app%3Dkube-dns 200 OK in 2 milliseconds
I1013 17:22:09.029246    1977 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/coredns?timeout=10s 404 Not Found in 3 milliseconds
I1013 17:22:09.033231    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.036216    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.039259    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.042499    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 2 milliseconds
I1013 17:22:09.050598    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/deployments?timeout=10s 201 Created in 7 milliseconds
I1013 17:22:09.058045    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/services?timeout=10s 201 Created in 6 milliseconds
[addons] Applied essential addon: CoreDNS
I1013 17:22:09.062452    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.071950    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/apps/v1/namespaces/kube-system/daemonsets?timeout=10s 201 Created in 8 milliseconds
I1013 17:22:09.075775    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.078976    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 3 milliseconds
I1013 17:22:09.085983    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 6 milliseconds
I1013 17:22:09.186601    1977 request.go:628] Waited for 100.309979ms due to client-side throttling, not priority and fairness, request: POST:https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s
I1013 17:22:09.191432    1977 round_trippers.go:553] POST https://apiserver.cluster.local:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 4 milliseconds
[addons] Applied essential addon: kube-proxy
I1013 17:22:09.192110    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/admin.conf
I1013 17:22:09.192679    1977 loader.go:373] Config loaded from file:  /etc/kubernetes/admin.conf

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join apiserver.cluster.local:6443 --token <value withheld> \
	--discovery-token-ca-cert-hash sha256:8a17edfd9a3ac44f2786991601c9fd13e8ff7f915b2a8f3bb45ac6aadca3ba06 \
	--control-plane --certificate-key <value withheld>

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.cluster.local:6443 --token <value withheld> \
	--discovery-token-ca-cert-hash sha256:8a17edfd9a3ac44f2786991601c9fd13e8ff7f915b2a8f3bb45ac6aadca3ba06 
2023-10-13T17:22:09 debug start to run command `rm -rf $HOME/.kube/config && mkdir -p  $HOME/.kube && cp /etc/kubernetes/admin.conf $HOME/.kube/config` via exec
2023-10-13T17:22:09 info Executing pipeline Join in CreateProcessor.
2023-10-13T17:22:09 info [192.168.1.46:22] will be added as worker
2023-10-13T17:22:09 info start to get kubernetes token...
2023-10-13T17:22:09 debug start to exec remote 192.168.1.41:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl token /root/.sealos/default/etc/kubeadm-init.yaml 386a68505a2ee5995726c30ba3c5e5ecd03e401d97bfe66c039510b7826bc21c
2023-10-13T17:22:09 debug host 192.168.1.41:22 is local, command via exec
2023-10-13T17:22:09 info fetch certSANs from kubeadm configmap
2023-10-13T17:22:09 debug current cluster config data: map[apiServer:map[certSANs:[127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41] extraArgs:map[audit-log-format:json audit-log-maxage:7 audit-log-maxbackup:10 audit-log-maxsize:100 audit-log-path:/var/log/kubernetes/audit.log audit-policy-file:/etc/kubernetes/audit-policy.yml authorization-mode:Node,RBAC enable-aggregator-routing:true feature-gates:] extraVolumes:[map[hostPath:/etc/kubernetes mountPath:/etc/kubernetes name:audit pathType:DirectoryOrCreate] map[hostPath:/var/log/kubernetes mountPath:/var/log/kubernetes name:audit-log pathType:DirectoryOrCreate] map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]] timeoutForControlPlane:4m0s] apiVersion:kubeadm.k8s.io/v1beta3 certificatesDir:/etc/kubernetes/pki clusterName:kubernetes controlPlaneEndpoint:apiserver.cluster.local:6443 controllerManager:map[extraArgs:map[bind-address:0.0.0.0 cluster-signing-duration:876000h feature-gates:] extraVolumes:[map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]]] dns:map[] etcd:map[local:map[dataDir:/var/lib/etcd extraArgs:map[listen-metrics-urls:http://0.0.0.0:2381]]] imageRepository:registry.k8s.io kind:ClusterConfiguration kubernetesVersion:v1.27.6 networking:map[dnsDomain:cluster.local podSubnet:100.64.0.0/10 serviceSubnet:10.96.0.0/22] scheduler:map[extraArgs:map[bind-address:0.0.0.0 feature-gates:] extraVolumes:[map[hostPath:/etc/localtime mountPath:/etc/localtime name:localtime pathType:File readOnly:true]]]]
2023-10-13T17:22:09 debug current cluster certSANs: [127.0.0.1 apiserver.cluster.local 10.103.97.2 192.168.1.41]
2023-10-13T17:22:09 info start to join 192.168.1.46:22 as worker
2023-10-13T17:22:09 info start to copy kubeadm join config to node: 192.168.1.46:22
2023-10-13T17:22:09 debug using default kubeadm config
2023-10-13T17:22:09 debug skip merging kubeadm configs from cause file /var/lib/sealos/data/default/rootfs/etc/kubeadm.yml not exists
2023-10-13T17:22:09 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:09 debug get vip is 10.103.97.2
2023-10-13T17:22:09 debug start to exec remote 192.168.1.46:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri socket
2023-10-13T17:22:09 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl cri socket` on 192.168.1.46:22
2023-10-13T17:22:09 debug get nodes [192.168.1.46:22] cri socket is [/run/containerd/containerd.sock]
2023-10-13T17:22:09 debug node: 192.168.1.46:22 , criSocket: /run/containerd/containerd.sock
2023-10-13T17:22:09 debug start to exec remote 192.168.1.46:22 shell: /var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short
2023-10-13T17:22:09 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl cri cgroup-driver --short` on 192.168.1.46:22
2023-10-13T17:22:10 debug get nodes [192.168.1.46:22] cgroup driver is [systemd]
2023-10-13T17:22:10 debug node: 192.168.1.46:22 , cGroupDriver: systemd
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug remote copy files src /root/.sealos/default/tmp/kubeadm-join-node.yaml to dst /root/.sealos/default/etc/kubeadm-join-node.yaml
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl  hosts add --ip 10.103.97.2  --domain apiserver.cluster.local` on 192.168.1.46:22
192.168.1.46:22	2023-10-13T17:22:10 info domain apiserver.cluster.local:10.103.97.2 append success
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl  hosts add --ip 192.168.1.46  --domain lvscare.node.ip` on 192.168.1.46:22
192.168.1.46:22	2023-10-13T17:22:10 info domain lvscare.node.ip:192.168.1.46 append success
2023-10-13T17:22:10 info run ipvs once module: 192.168.1.46:22
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl  ipvs --vs 10.103.97.2:6443  --rs  192.168.1.41:6443  --health-path /healthz --health-schem https --run-once` on 192.168.1.46:22
192.168.1.46:22	2023-10-13T17:22:10 info Trying to add route
192.168.1.46:22	2023-10-13T17:22:10 info success to set route.(host:10.103.97.2, gateway:192.168.1.46)
2023-10-13T17:22:10 info start join node: 192.168.1.46:22
2023-10-13T17:22:10 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:10 debug get vip is 10.103.97.2
2023-10-13T17:22:10 debug start to exec `kubeadm join --config=/root/.sealos/default/etc/kubeadm-join-node.yaml -v 6` on 192.168.1.46:22
192.168.1.46:22	I1013 17:22:10.561231    1954 join.go:412] [preflight] found NodeName empty; using OS hostname as NodeName
192.168.1.46:22	I1013 17:22:10.561310    1954 joinconfiguration.go:76] loading configuration from "/root/.sealos/default/etc/kubeadm-join-node.yaml"
192.168.1.46:22	W1013 17:22:10.562477    1954 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
192.168.1.46:22	[preflight] Running pre-flight checks
192.168.1.46:22	I1013 17:22:10.562620    1954 preflight.go:93] [preflight] Running general checks
192.168.1.46:22	I1013 17:22:10.562687    1954 checks.go:280] validating the existence of file /etc/kubernetes/kubelet.conf
192.168.1.46:22	I1013 17:22:10.562705    1954 checks.go:280] validating the existence of file /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22	I1013 17:22:10.562725    1954 checks.go:104] validating the container runtime
192.168.1.46:22	I1013 17:22:10.592094    1954 checks.go:639] validating whether swap is enabled or not
192.168.1.46:22	I1013 17:22:10.592189    1954 checks.go:370] validating the presence of executable crictl
192.168.1.46:22	I1013 17:22:10.592246    1954 checks.go:370] validating the presence of executable conntrack
192.168.1.46:22	I1013 17:22:10.592312    1954 checks.go:370] validating the presence of executable ip
192.168.1.46:22	I1013 17:22:10.592364    1954 checks.go:370] validating the presence of executable iptables
192.168.1.46:22	I1013 17:22:10.592418    1954 checks.go:370] validating the presence of executable mount
192.168.1.46:22	I1013 17:22:10.592476    1954 checks.go:370] validating the presence of executable nsenter
192.168.1.46:22	I1013 17:22:10.592564    1954 checks.go:370] validating the presence of executable ebtables
192.168.1.46:22	I1013 17:22:10.592617    1954 checks.go:370] validating the presence of executable ethtool
192.168.1.46:22	I1013 17:22:10.592680    1954 checks.go:370] validating the presence of executable socat
192.168.1.46:22		[WARNING FileExisting-socat]: socat not found in system path
192.168.1.46:22	I1013 17:22:10.592801    1954 checks.go:370] validating the presence of executable tc
192.168.1.46:22	I1013 17:22:10.592854    1954 checks.go:370] validating the presence of executable touch
192.168.1.46:22	I1013 17:22:10.592921    1954 checks.go:516] running all checks
192.168.1.46:22	I1013 17:22:10.604349    1954 checks.go:401] checking whether the given node name is valid and reachable using net.LookupHost
192.168.1.46:22	I1013 17:22:10.604655    1954 checks.go:605] validating kubelet version
192.168.1.46:22	I1013 17:22:10.671270    1954 checks.go:130] validating if the "kubelet" service is enabled and active
192.168.1.46:22	I1013 17:22:10.681081    1954 checks.go:203] validating availability of port 10250
192.168.1.46:22	I1013 17:22:10.681276    1954 checks.go:280] validating the existence of file /etc/kubernetes/pki/ca.crt
192.168.1.46:22	I1013 17:22:10.681357    1954 checks.go:430] validating if the connectivity type is via proxy or direct
192.168.1.46:22	I1013 17:22:10.681404    1954 checks.go:329] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
192.168.1.46:22	I1013 17:22:10.681473    1954 checks.go:329] validating the contents of file /proc/sys/net/ipv4/ip_forward
192.168.1.46:22	I1013 17:22:10.681534    1954 join.go:529] [preflight] Discovering cluster-info
192.168.1.46:22	I1013 17:22:10.681578    1954 token.go:80] [discovery] Created cluster-info discovery client, requesting info from "10.103.97.2:6443"
192.168.1.46:22	I1013 17:22:10.690072    1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22	I1013 17:22:10.690481    1954 token.go:223] [discovery] The cluster-info ConfigMap does not yet contain a JWS signature for token ID "jlagbc", will try again
192.168.1.46:22	I1013 17:22:15.775241    1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 3 milliseconds
192.168.1.46:22	I1013 17:22:15.775595    1954 token.go:223] [discovery] The cluster-info ConfigMap does not yet contain a JWS signature for token ID "jlagbc", will try again
192.168.1.46:22	I1013 17:22:21.415228    1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 3 milliseconds
192.168.1.46:22	I1013 17:22:21.416013    1954 token.go:118] [discovery] Requesting info from "10.103.97.2:6443" again to validate TLS against the pinned public key
192.168.1.46:22	I1013 17:22:21.423733    1954 round_trippers.go:553] GET https://10.103.97.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22	I1013 17:22:21.424129    1954 token.go:135] [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "10.103.97.2:6443"
192.168.1.46:22	I1013 17:22:21.424172    1954 discovery.go:52] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
192.168.1.46:22	I1013 17:22:21.424206    1954 join.go:543] [preflight] Fetching init configuration
192.168.1.46:22	I1013 17:22:21.424231    1954 join.go:589] [preflight] Retrieving KubeConfig objects
192.168.1.46:22	[preflight] Reading configuration from the cluster...
192.168.1.46:22	[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
192.168.1.46:22	I1013 17:22:21.431787    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s 200 OK in 7 milliseconds
192.168.1.46:22	I1013 17:22:21.435262    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kube-proxy?timeout=10s 200 OK in 1 milliseconds
192.168.1.46:22	I1013 17:22:21.437782    1954 kubelet.go:74] attempting to download the KubeletConfiguration from ConfigMap "kubelet-config"
192.168.1.46:22	I1013 17:22:21.440548    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config?timeout=10s 200 OK in 2 milliseconds
192.168.1.46:22	I1013 17:22:21.443472    1954 interface.go:432] Looking for default routes with IPv4 addresses
192.168.1.46:22	I1013 17:22:21.443505    1954 interface.go:437] Default route transits interface "ens33"
192.168.1.46:22	I1013 17:22:21.443782    1954 interface.go:209] Interface ens33 is up
192.168.1.46:22	I1013 17:22:21.443952    1954 interface.go:257] Interface "ens33" has 2 addresses :[192.168.1.46/24 fe80::8773:c4f5:cbc8:7f56/64].
192.168.1.46:22	I1013 17:22:21.443976    1954 interface.go:224] Checking addr  192.168.1.46/24.
192.168.1.46:22	I1013 17:22:21.443987    1954 interface.go:231] IP found 192.168.1.46
192.168.1.46:22	I1013 17:22:21.444018    1954 interface.go:263] Found valid IPv4 address 192.168.1.46 for interface "ens33".
192.168.1.46:22	I1013 17:22:21.444047    1954 interface.go:443] Found active IP 192.168.1.46 
192.168.1.46:22	W1013 17:22:21.444069    1954 utils.go:69] The recommended value for "healthzBindAddress" in "KubeletConfiguration" is: 127.0.0.1; the provided value is: 0.0.0.0
192.168.1.46:22	I1013 17:22:21.448455    1954 preflight.go:104] [preflight] Running configuration dependant checks
192.168.1.46:22	I1013 17:22:21.448492    1954 controlplaneprepare.go:225] [download-certs] Skipping certs download
192.168.1.46:22	I1013 17:22:21.448510    1954 kubelet.go:121] [kubelet-start] writing bootstrap kubelet config file at /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22	I1013 17:22:21.449164    1954 kubelet.go:136] [kubelet-start] writing CA certificate at /etc/kubernetes/pki/ca.crt
192.168.1.46:22	I1013 17:22:21.449596    1954 loader.go:373] Config loaded from file:  /etc/kubernetes/bootstrap-kubelet.conf
192.168.1.46:22	I1013 17:22:21.449887    1954 kubelet.go:157] [kubelet-start] Checking for an existing Node in the cluster with name "k8s-node1" and status "Ready"
192.168.1.46:22	I1013 17:22:21.452477    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 2 milliseconds
192.168.1.46:22	I1013 17:22:21.452914    1954 kubelet.go:172] [kubelet-start] Stopping the kubelet
192.168.1.46:22	[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
192.168.1.46:22	[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
192.168.1.46:22	[kubelet-start] Starting the kubelet
192.168.1.46:22	[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
192.168.1.46:22	I1013 17:22:22.580102    1954 loader.go:373] Config loaded from file:  /etc/kubernetes/kubelet.conf
192.168.1.46:22	I1013 17:22:22.581326    1954 loader.go:373] Config loaded from file:  /etc/kubernetes/kubelet.conf
192.168.1.46:22	I1013 17:22:22.581486    1954 cert_rotation.go:137] Starting client certificate rotation controller
192.168.1.46:22	I1013 17:22:22.581771    1954 kubelet.go:220] [kubelet-start] preserving the crisocket information for the node
192.168.1.46:22	I1013 17:22:22.581808    1954 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///run/containerd/containerd.sock" to the Node API object "k8s-node1" as an annotation
192.168.1.46:22	I1013 17:22:23.091731    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 8 milliseconds
192.168.1.46:22	I1013 17:22:23.585294    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 404 Not Found in 2 milliseconds
192.168.1.46:22	I1013 17:22:24.084994    1954 round_trippers.go:553] GET https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 200 OK in 2 milliseconds
192.168.1.46:22	I1013 17:22:24.090869    1954 round_trippers.go:553] PATCH https://apiserver.cluster.local:6443/api/v1/nodes/k8s-node1?timeout=10s 200 OK in 4 milliseconds
192.168.1.46:22	
192.168.1.46:22	This node has joined the cluster:
192.168.1.46:22	* Certificate signing request was sent to apiserver and a response was received.
192.168.1.46:22	* The Kubelet was informed of the new secure connection details.
192.168.1.46:22	
192.168.1.46:22	Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
192.168.1.46:22	
2023-10-13T17:22:24 info succeeded in joining 192.168.1.46:22 as worker
2023-10-13T17:22:24 debug remote copy files src /root/.sealos/default/etc/admin.conf to dst .kube/config
2023-10-13T17:22:24 info start to sync lvscare static pod to node: 192.168.1.46:22 master: [192.168.1.41:6443]
2023-10-13T17:22:24 debug renderTextFromEnv: replaces: map[$(SEALOS_SYS_CRI_ENDPOINT):/var/run/containerd/containerd.sock $(SEALOS_SYS_IMAGE_ENDPOINT):/var/run/image-cri-shim.sock $(criData):/var/lib/containerd $(defaultVIP):10.103.97.2 $(disableApparmor):false $(registryConfig):/etc/registry $(registryData):/var/lib/registry $(registryDomain):sealos.hub $(registryPassword):passw0rd $(registryPort):5000 $(registryUsername):admin $(sandboxImage):pause:3.9 $SEALOS_SYS_CRI_ENDPOINT:/var/run/containerd/containerd.sock $SEALOS_SYS_IMAGE_ENDPOINT:/var/run/image-cri-shim.sock $criData:/var/lib/containerd $defaultVIP:10.103.97.2 $disableApparmor:false $registryConfig:/etc/registry $registryData:/var/lib/registry $registryDomain:sealos.hub $registryPassword:passw0rd $registryPort:5000 $registryUsername:admin $sandboxImage:pause:3.9 ${SEALOS_SYS_CRI_ENDPOINT}:/var/run/containerd/containerd.sock ${SEALOS_SYS_IMAGE_ENDPOINT}:/var/run/image-cri-shim.sock ${criData}:/var/lib/containerd ${defaultVIP}:10.103.97.2 ${disableApparmor}:false ${registryConfig}:/etc/registry ${registryData}:/var/lib/registry ${registryDomain}:sealos.hub ${registryPassword}:passw0rd ${registryPort}:5000 ${registryUsername}:admin ${sandboxImage}:pause:3.9] ; text: $defaultVIP
2023-10-13T17:22:24 debug get vip is 10.103.97.2
2023-10-13T17:22:24 debug start to exec `/var/lib/sealos/data/default/rootfs/opt/sealctl  static-pod lvscare --name kube-sealos-lvscare --vip 10.103.97.2:6443 --image ghcr.io/labring/lvscare:v4.3.5   --masters  192.168.1.41:6443` on 192.168.1.46:22
192.168.1.46:22	2023-10-13T17:22:24 info generator lvscare static pod is success
2023-10-13T17:22:24 info Executing pipeline RunGuest in CreateProcessor.
2023-10-13T17:22:24 debug start to exec `` on 192.168.1.46:22
2023-10-13T17:22:24 debug start to run command `` via exec
2023-10-13T17:22:24 debug start to run command `cd /var/lib/sealos/data/default/applications/default-mamxdvth/workdir && cp -f opt/helm /usr/bin/helm` via exec
2023-10-13T17:22:24 debug start to run command `cd /var/lib/sealos/data/default/applications/default-xq4qhf1k/workdir && helm upgrade -i calico charts/calico -f charts/calico.values.yaml -n tigera-operator --create-namespace` via exec
Release "calico" does not exist. Installing it now.
NAME: calico
LAST DEPLOYED: Fri Oct 13 17:22:27 2023
NAMESPACE: tigera-operator
STATUS: deployed
REVISION: 1
TEST SUITE: None
2023-10-13T17:22:29 info succeeded in creating a new cluster, enjoy it!
2023-10-13T17:22:29 debug save objects into local: /root/.sealos/default/Clusterfile, objects: [apiVersion: apps.sealos.io/v1beta1
kind: Cluster
metadata:
  creationTimestamp: "2023-10-13T09:22:29Z"
  name: default
spec:
  hosts:
  - ips:
    - 192.168.1.41:22
    roles:
    - master
    - amd64
  - ips:
    - 192.168.1.46:22
    roles:
    - node
    - amd64
  image:
  - labring/kubernetes:v1.27.6
  - labring/helm:v3.12.3
  - labring/calico:v3.26.1
  ssh:
    passwd: kgb007
status:
  commandCondition:
  - images:
    - labring/kubernetes:v1.27.6
    - labring/helm:v3.12.3
    - labring/calico:v3.26.1
    lastHeartbeatTime: "2023-10-13T09:22:29Z"
    message: Applied to cluster successfully
    reason: Apply Command
    status: "True"
    type: ApplyCommandSuccess
  conditions:
  - lastHeartbeatTime: "2023-10-13T09:22:29Z"
    message: Applied to cluster successfully
    reason: Ready
    status: "True"
    type: ApplyClusterSuccess
  mounts:
  - env:
      SEALOS_SYS_CRI_ENDPOINT: /var/run/containerd/containerd.sock
      SEALOS_SYS_IMAGE_ENDPOINT: /var/run/image-cri-shim.sock
      criData: /var/lib/containerd
      defaultVIP: 10.103.97.2
      disableApparmor: "false"
      registryConfig: /etc/registry
      registryData: /var/lib/registry
      registryDomain: sealos.hub
      registryPassword: passw0rd
      registryPort: "5000"
      registryUsername: admin
      sandboxImage: pause:3.9
    imageName: labring/kubernetes:v1.27.6
    labels:
      check: check.sh $registryData
      clean: clean.sh && bash clean-cri.sh $criData
      clean-registry: clean-registry.sh $registryData $registryConfig
      image: ghcr.io/labring/lvscare:v4.3.5
      init: init-cri.sh $registryDomain $registryPort && bash init.sh
      init-registry: init-registry.sh $registryData $registryConfig
      io.buildah.version: 1.30.0
      org.opencontainers.image.description: kubernetes-v1.27.6 container image
      org.opencontainers.image.licenses: MIT
      org.opencontainers.image.source: https://github.com/labring-actions/cache
      sealos.io.type: rootfs
      sealos.io.version: v1beta1
      version: v1.27.6
      vip: $defaultVIP
    mountPoint: /var/lib/containers/storage/overlay/7f3a1913735b085021c94ebe3bcd44c25a1538f0fc5a75df989a3fd27500c8d0/merged
    name: default-g23pfjqw
    type: rootfs
  - cmd:
    - cp -f opt/helm /usr/bin/helm
    imageName: labring/helm:v3.12.3
    labels:
      io.buildah.version: 1.30.0
    mountPoint: /var/lib/containers/storage/overlay/e2329b1df7f9a5888a4c01365cb8e7ddbb8f08d645dbb3faca11d9d568fbeb1b/merged
    name: default-mamxdvth
    type: application
  - cmd:
    - helm upgrade -i calico charts/calico -f charts/calico.values.yaml -n tigera-operator
      --create-namespace
    imageName: labring/calico:v3.26.1
    labels:
      io.buildah.version: 1.30.0
    mountPoint: /var/lib/containers/storage/overlay/b2fe92f5677ab889adaa7e9ae8741a6d076f9503cda50d43ea98263fe243980c/merged
    name: default-xq4qhf1k
    type: application
  phase: ClusterSuccess
]
2023-10-13T17:22:29 info 
      ___           ___           ___           ___       ___           ___
     /\  \         /\  \         /\  \         /\__\     /\  \         /\  \
    /::\  \       /::\  \       /::\  \       /:/  /    /::\  \       /::\  \
   /:/\ \  \     /:/\:\  \     /:/\:\  \     /:/  /    /:/\:\  \     /:/\ \  \
  _\:\~\ \  \   /::\~\:\  \   /::\~\:\  \   /:/  /    /:/  \:\  \   _\:\~\ \  \
 /\ \:\ \ \__\ /:/\:\ \:\__\ /:/\:\ \:\__\ /:/__/    /:/__/ \:\__\ /\ \:\ \ \__\
 \:\ \:\ \/__/ \:\~\:\ \/__/ \/__\:\/:/  / \:\  \    \:\  \ /:/  / \:\ \:\ \/__/
  \:\ \:\__\    \:\ \:\__\        \::/  /   \:\  \    \:\  /:/  /   \:\ \:\__\
   \:\/:/  /     \:\ \/__/        /:/  /     \:\  \    \:\/:/  /     \:\/:/  /
    \::/  /       \:\__\         /:/  /       \:\__\    \::/  /       \::/  /
     \/__/         \/__/         \/__/         \/__/     \/__/         \/__/

                  Website: https://www.sealos.io/
                  Address: github.com/labring/sealos
                  Version: 4.3.5-881c10cb
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408
  • 409
  • 410
  • 411
  • 412
  • 413
  • 414
  • 415
  • 416
  • 417
  • 418
  • 419
  • 420
  • 421
  • 422
  • 423
  • 424
  • 425
  • 426
  • 427
  • 428
  • 429
  • 430
  • 431
  • 432
  • 433
  • 434
  • 435
  • 436
  • 437
  • 438
  • 439
  • 440
  • 441
  • 442
  • 443
  • 444
  • 445
  • 446
  • 447
  • 448
  • 449
  • 450
  • 451
  • 452
  • 453
  • 454
  • 455
  • 456
  • 457
  • 458
  • 459
  • 460
  • 461
  • 462
  • 463
  • 464
  • 465
  • 466
  • 467
  • 468
  • 469
  • 470
  • 471
  • 472
  • 473
  • 474
  • 475
  • 476
  • 477
  • 478
  • 479
  • 480
  • 481
  • 482
  • 483
  • 484
  • 485
  • 486
  • 487
  • 488
  • 489
  • 490
  • 491
  • 492
  • 493
  • 494
  • 495
  • 496
  • 497
  • 498
  • 499
  • 500
  • 501
  • 502
  • 503
  • 504
  • 505
  • 506
  • 507
  • 508
  • 509
  • 510
  • 511
  • 512
  • 513
  • 514
  • 515
  • 516
  • 517
  • 518
  • 519
  • 520
  • 521
  • 522
  • 523
  • 524
  • 525
  • 526
  • 527
  • 528
  • 529
  • 530
  • 531
  • 532
  • 533
  • 534
  • 535
  • 536
  • 537
  • 538
  • 539
  • 540
  • 541
  • 542
  • 543
  • 544
  • 545
  • 546
  • 547
  • 548
  • 549
  • 550
  • 551
  • 552
  • 553
  • 554
  • 555
  • 556
  • 557
  • 558
  • 559
  • 560
  • 561
  • 562
  • 563
  • 564
  • 565
  • 566
  • 567
  • 568
  • 569
  • 570
  • 571
  • 572
  • 573
  • 574
  • 575
  • 576
  • 577
  • 578
  • 579
  • 580
  • 581
  • 582
  • 583
  • 584
  • 585
  • 586
  • 587
  • 588
  • 589
  • 590
  • 591
  • 592
  • 593
  • 594
  • 595
  • 596
  • 597
  • 598
  • 599
  • 600
  • 601
  • 602
  • 603
  • 604
  • 605
  • 606
  • 607
  • 608
  • 609
  • 610
  • 611
  • 612
  • 613
  • 614
  • 615
  • 616
  • 617
  • 618
  • 619
  • 620
  • 621
  • 622
  • 623
  • 624
  • 625
  • 626
  • 627
  • 628
  • 629
  • 630
  • 631
  • 632
  • 633
  • 634
  • 635
  • 636
  • 637
  • 638
  • 639
  • 640
  • 641
  • 642
  • 643
  • 644
  • 645
  • 646
  • 647
  • 648
  • 649
  • 650
  • 651
  • 652
  • 653
  • 654
  • 655
  • 656
  • 657
  • 658
  • 659
  • 660
  • 661
  • 662
  • 663
  • 664
  • 665
  • 666
  • 667
  • 668
  • 669
  • 670
  • 671
  • 672
  • 673
  • 674
  • 675
  • 676
  • 677
  • 678
  • 679
  • 680
  • 681
  • 682
  • 683
  • 684
  • 685
  • 686
  • 687
  • 688
  • 689
  • 690
  • 691
  • 692
  • 693
  • 694
  • 695
  • 696
  • 697
  • 698
  • 699
  • 700
  • 701
  • 702
  • 703
  • 704
  • 705
  • 706
  • 707
  • 708
  • 709
  • 710
  • 711
  • 712
  • 713
  • 714
  • 715
  • 716
  • 717
  • 718
  • 719
  • 720
  • 721
  • 722
  • 723
  • 724
  • 725
  • 726
  • 727
  • 728
  • 729
  • 730
  • 731
  • 732
  • 733
  • 734
  • 735
  • 736
  • 737
  • 738
  • 739
  • 740
  • 741
  • 742
  • 743
  • 744
  • 745
  • 746
  • 747
  • 748
  • 749
  • 750
  • 751
  • 752
  • 753
  • 754
  • 755
  • 756
  • 757
  • 758
  • 759
  • 760
  • 761
  • 762
  • 763
  • 764
  • 765
  • 766
  • 767
  • 768
  • 769
  • 770
  • 771
  • 772
  • 773
  • 774
  • 775
  • 776
  • 777
  • 778
  • 779
  • 780
  • 781
  • 782
  • 783
  • 784
  • 785
  • 786
  • 787
  • 788
  • 789
  • 790
  • 791
  • 792
  • 793
  • 794
  • 795
  • 796
  • 797
  • 798
  • 799
  • 800
  • 801
  • 802
  • 803
  • 804
  • 805
  • 806
  • 807
  • 808
  • 809
  • 810
  • 811
  • 812
  • 813
  • 814
  • 815
  • 816
  • 817
  • 818
  • 819
  • 820
  • 821
  • 822
  • 823
  • 824
  • 825
  • 826
  • 827
  • 828
  • 829
  • 830
  • 831
  • 832
  • 833
  • 834
  • 835
  • 836
  • 837
  • 838
  • 839
  • 840
  • 841
  • 842
  • 843
  • 844
  • 845
  • 846
  • 847
  • 848
  • 849
  • 850
  • 851
  • 852
  • 853
  • 854
  • 855
  • 856
  • 857
  • 858
  • 859
  • 860
  • 861
  • 862
  • 863
  • 864
  • 865
  • 866
  • 867
  • 868
  • 869
  • 870
  • 871
  • 872
  • 873
  • 874
  • 875
  • 876
  • 877
  • 878
  • 879
  • 880
  • 881
  • 882
  • 883
  • 884
  • 885
  • 886
  • 887
  • 888
  • 889
  • 890
  • 891
  • 892
  • 893
  • 894
  • 895
  • 896
  • 897
  • 898
  • 899
  • 900
  • 901
  • 902
  • 903
  • 904
  • 905
  • 906
  • 907
  • 908
  • 909
  • 910
  • 911
  • 912
  • 913
  • 914
  • 915
  • 916
  • 917
  • 918
  • 919
  • 920
  • 921
  • 922
  • 923
  • 924
  • 925
  • 926
  • 927
  • 928
  • 929
  • 930
  • 931
  • 932
  • 933
  • 934
  • 935
  • 936
  • 937
  • 938
  • 939
  • 940
  • 941

看到这个图标,恭喜你成功了!

kubectl get node
NAME          STATUS   ROLES           AGE   VERSION
k8s-master1   Ready    control-plane   31m   v1.27.6
k8s-node1     Ready    <none>          30m   v1.27.6
  • 1
  • 2
  • 3
  • 4

目前只用了1台master和node,下一篇sealos4.3.5安装手册(二)追加节点 使用join添加节点

下面这个图可以增加博主分享知识的动力,来助力一下吧

学习的动力

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Guff_9hys/article/detail/955758
推荐阅读
相关标签
  

闽ICP备14008679号