赞
踩
name | version |
---|---|
ubuntu | 20.04 |
kubectl | 1.21.13 |
helm | 3.9.3 |
docker | 20.10.17 |
# 备份默认apt源
$ sudo mv /etc/apt/sources.list /etc/apt/sources.list.bak
# 替换阿里源
$ sudo vi /etc/apt/sources.list
$ sudo apt-get update -y && sudo apt-get upgrade -y
# 阿里源
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
# 安装openssh-server是为了使用xshell远程连接
$ sudo apt install -y curl git tree openssh-server
## 安装openssh-server若出现一下错误、先安装指定版本的openssh-client
openssh-server : Depends: openssh-client (= 1:7.6p1-4ubuntu0.7)
Depends: openssh-sftp-server but it is not going to be installed
$ sudo apt install -y openssh-client= 1:7.6p1-4ubuntu0.7
$ sudo passwd root
New password: ******
Retype new password: ******
# 切换root用户(为了方便切换root用户,执行命令不再添加sudo前缀提权)
$ sudo root
[sudo] password for coderwqs: ******
$ curl -fsSL https://get.docker.com -o get-docker.sh
$ sh ./get-docker.sh
$ systemctl start docker && systemctl enable docker
# 设置docker国内镜像源
$ cat <<EOF>> /etc/docker/daemon.json
{
"registry-mirrors": [
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"https://registry.docker-cn.com"
]
}
EOF
# 重启docker服务
$ systemctl daemon-reload && systemctl restart docker
# 检查docker
$ docker version
# ubuntu默认防火墙关闭
# 防火墙状态、启用、禁用、版本
$ ufw status | enable | disable | version
# 允许/拒绝外部访问本机
$ ufw default allow | deny
# 允许/拒绝外部端口访问本机
$ ufw allow | deny [port]
# 允许某个IP地址访问本机所有端口
$ ufw allow from [ip]
# 关闭防火墙
$ systemctl stop firewalld && systemctl disable firewalld
$ sed -i 's/enforcing/disabled/g' /etc/selinux/config; setenforce 0
# 放行端口
$ firewall-cmd --zone=public --add-port=81/tcp --permanent
$ firewall-cmd --zone=public --add-port=444/tcp --permanent
$ docker run -itd --privileged -p 70:80 -p 444:443 -v /data/docker/volumes/rancher:/var/lib/rancher --name rancher-server --restart=unless-stopped -e CATTLE_AGENT_IMAGE="registry.cn-hangzhou.aliyuncs.com/rancher/rancher-agent:v2.6.3" registry.cn-hangzhou.aliyuncs.com/rancher/rancher:v2.6.3
# 查看rancher日志
$ docker logs -f rancher-server
# 获取初始化密码
$ docker logs rancher-server 2>&1 | grep "Bootstrap Password:"
# 访问服务(服务启动过程有点慢)
https://[host]:444
$ mkdir ~/.kube
# 将复制的内容粘贴到~/.kube/config文件
$ vi ~/.kube/config
# 添加验证密钥
$ curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
$ apt-get install apt-transport-https --yes
# 添加helm和kubernetes apt源
$ echo "deb https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list
$ echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
$ apt-get update
# 查看可用的kubeadm kubelet kubectl版本
$ apt-cache madison kubeadm kubelet kubectl
# 安装kubectl(保持版本和安装的kubernetes一致)
$ apt-get install -y kubectl=1.21.13-00
# 安装helm
$ apt-get install helm
# 测试
$ helm version
$ kubectl get nodes
# 添加kubernetes yum源
cat >> /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
# 查看可安装版本
$ yum list kubectl –showduplicates
# 安装kubectl
$ yum install -y kubectl.x86_64
# 下载helm
# 华为镜像:https://mirrors.huaweicloud.com/helm/
$ wget https://get.helm.sh/helm-v3.9.2-linux-amd64.tar.gz
# 解压并移动helm至/usr/local/bin
$ tar -zxvf helm-v3.9.2-linux-amd64.tar.gz
$ mv ./linux-amd64/helm /usr/local/bin && chmod +x /usr/local/bin/helm
# 验证helm
$ helm version
# 添加项目所需的heml仓库
$ helm repo add stable https://charts.helm.sh/stable
$ helm repo add bitnami https://charts.bitnami.com/bitnami
# 克隆项目
$ git clone https://github.com/mainflux/devops.git
# 切换到chart目录
$ cd devops/charts/mainflux
# 更新项目依赖
$ helm dependency update
# 创建命名空间
$ kubectl create namespace mf
# 安装项目
$ helm install mainflux . -n mf
##########################################################
# 该项目必须提供一定数量的pv卷支持
# 如果拉取镜像太慢,可以手动拉取
# pv卷类型,目前rancher v2.6.6不支持local
# 若提示tcp-service启动失败,请修改ingress.yaml为相关服务
# 提供独立的namespaces
##########################################################
# 若部分服务启动失败,先查看pod日志
$ kubectl logs [pod-name] -n [namespace-name]
#!/bin/bash
KUBE_SVC='
kubelet
kube-scheduler
kube-proxy
kube-controller-manager
kube-apiserver
'
for kube_svc in ${KUBE_SVC};
do
# 停止服务
if [[ `systemctl is-active ${kube_svc}` == 'active' ]]; then
systemctl stop ${kube_svc}
fi
# 禁止服务开机启动
if [[ `systemctl is-enabled ${kube_svc}` == 'enabled' ]]; then
systemctl disable ${kube_svc}
fi
done
# 停止所有容器
docker stop $(docker ps -aq)
# 删除所有容器
docker rm -f $(docker ps -qa)
# 删除所有容器卷
docker volume rm $(docker volume ls -q)
# 卸载mount目录
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher;
do
umount $mount;
done
# 备份目录
# mv /etc/kubernetes /etc/kubernetes-bak-$(date +"%Y%m%d%H%M")
# mv /var/lib/etcd /var/lib/etcd-bak-$(date +"%Y%m%d%H%M")
# mv /var/lib/rancher /var/lib/rancher-bak-$(date +"%Y%m%d%H%M")
# mv /opt/rke /opt/rke-bak-$(date +"%Y%m%d%H%M")
# 删除残留路径
rm -rf /etc/ceph \
/etc/cni \
/opt/cni \
/run/secrets/kubernetes.io \
/run/calico \
/run/flannel \
/var/lib/calico \
/var/lib/cni \
/var/lib/kubelet \
/var/log/containers \
/var/log/kube-audit \
/var/log/pods \
/var/run/calico \
/usr/libexec/kubernetes \
/data/docker/volumes/rancher/*
# 清理网络接口
no_del_net_inter='
lo
docker0
eth
ens
bond
'
network_interface=`ls /sys/class/net`
for net_inter in $network_interface;
do
if ! echo "${no_del_net_inter}" | grep -qE ${net_inter:0:3}; then
ip link delete $net_inter
fi
done
# 清理残留进程
port_list='
80
443
6443
2376
2379
2380
8472
9099
10250
10254
'
for port in $port_list;
do
pid=`netstat -atlnup | grep $port | awk '{print $7}' | awk -F '/' '{print $1}' | grep -v - | sort -rnk2 | uniq`
if [[ -n $pid ]]; then
kill -9 $pid
fi
done
kube_pid=`ps -ef | grep -v grep | grep kube | awk '{print $2}'`
if [[ -n $kube_pid ]]; then
kill -9 $kube_pid
fi
# 清理Iptables表
## 注意:如果节点Iptables有特殊配置,以下命令请谨慎操作
sudo iptables --flush
sudo iptables --flush --table nat
sudo iptables --flush --table filter
sudo iptables --table nat --delete-chain
sudo iptables --table filter --delete-chain
systemctl restart docker
W: GPG 错误:https://dl.winehq.org/wine-builds/ubuntu xenial InRelease: 由于没有公钥,无法验证下列签名: NO_PUBKEY 76F1A20FF987672F
$ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys <PUBKEY_ID>
[etcd] Failed to bring up Etcd Plane: etcd cluster is unhealthy: hosts [192.168.100.666] failed to report healthy. Check etcd container logs on each host for more information
$ docker stop $(docker ps -aq)
$ docker system prune -f
$ docker volume rm $(docker volume ls -q)
$ rm -rf /etc/ceph \
/etc/cni \
/etc/kubernetes \
/opt/cni \
/opt/rke \
/run/secrets/kubernetes.io \
/run/calico \
/run/flannel \
/var/lib/calico \
/var/lib/etcd \
/var/lib/cni \
/var/lib/kubelet \
/var/lib/rancher/rke/log \
/var/log/containers \
/var/log/pods \
/var/run/calico \
/data/docker/volume/rancher
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /root/.kube/config
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /root/.kube/config
$ chmod g-rw ~/.kube/config
$ chmod o-r ~/.kube/config
# 针对jenkins处理,若没有jenkins可忽略
$ chmod -R 600 ~/.kube/config
rancher
安装内容整理(此处以Ubuntu
为准) # ubuntu
curl https://releases.rancher.com/install-docker/20.10.sh | sh
# centos
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
yum install docker-ce-20.10.9 -y
systemctl disable firewalld --now
systemctl enable docker --now
docker
私有仓库 docker run --name registry -d -p 5000:5000 --restart=always -v /var/lib/registry:/var/lib/registry registry
# 编辑私有仓库配置文件
cat <<EOF>> /etc/docker/daemon.json
{
"registry-mirrors": ["https://t0gam4iy.mirror.aliyuncs.com"],
"insecure-registries": ["192.168.3.101:5000"]
}
EOF
systemctl daemon-reload && systemctl restart docker
# 查看仓库镜像及特定镜像版本号
curl -XGET http://192.168.3.101:5000/v2/_catalog
curl -XGET http://192.168.3.101:5000/v2/rancher/rancher/tags/list
Linux
内核 # 查看所有内核配置
sysctl -a
# 立即生效
sysctl -w net.bridge.bridge-nf-call-iptables=1
# 永久写入
sysctl -w net.bridge.bridge-nf-call-iptables=1 >> /etc/sysctl.conf
rancher
使用私有仓库注意事项 # rancher内置k3s容器运行时采用containerd,不再使用docker,所以需要为containerd指定私有镜像仓库配置
# 通过容器卷映射 registries.yaml 配置文件
cat <<EOF>> registries.yaml
mirrors:
192.168.3.101:5000:
endpoint:
- "http://192.168.3.101:5000"
EOF
rancher
所需的镜像 # 下载特定release版本的rancher镜像制作脚本
# 安装helm工具
# 华为镜像:https://mirrors.huaweicloud.com/helm/
wget https://get.helm.sh/helm-v3.9.2-linux-amd64.tar.gz
# 解压并移动helm至/usr/local/bin
tar -zxvf helm-v3.9.2-linux-amd64.tar.gz
mv ./linux-amd64/helm /usr/local/bin
# 添加证书仓库
helm repo add jetstack https://charts.jetstack.io
# 更新仓库
helm repo update
# 获取证书
helm fetch jetstack/cert-manager --version v1.7.1
# 使用 Rancher 默认的自签名 TLS 证书,则必须添加 cert-manager 镜像
helm template ./cert-manager-v1.7.1.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt
# 对镜像列表进行排序和唯一化,去除重复的镜像源
sort -u rancher-images.txt -o rancher-images.txt
# 提权
chmod +x rancher-save-images.sh rancher-load-images.sh
# 创建所有需要镜像的压缩包
./rancher-save-images.sh --image-list ./rancher-images.txt
# 将镜像上传到私有镜像库
./rancher-load-images.sh --image-list ./rancher-images.txt --registry <REGISTRY.YOURDOMAIN.COM:PORT>`
# step 3
mkdir -p rancher/k3s && cd rancher
cat <<EOF>> k3s/registries.yaml
mirrors:
192.168.3.101:5000:
endpoint:
- "http://192.168.3.101:5000"
EOF
docker run --privileged -d --restart=unless-stopped \
-p 81:80 -p 444:443 --name=rancher-server \
-v ./rancher:/var/lib/rancher \
-v ./k3s/registries.yaml:/etc/rancher/k3s/registries.yaml \
-e CATTLE_SYSTEM_DEFAULT_REGISTRY=192.168.3.101:5000 \
-e CATTLE_BOOTSTRAP_PASSWORD=admin123 \
-e CATTLE_SYSTEM_CATALOG=bundled \
192.168.3.101:5000/rancher/rancher:v2.7.1
# 如果rancher ui安装的自定义集群不启动,建议降低Kubernetes的version(important !)
# 81->80, 444->443 用于创建单节点集群(etcd,control plane,worker in one host),不做端口映射会导致rancher-agent注册失败
rancher
# 对于single rancher可以指定内部访问ip,避免ip变动引起的不必要问题
## -e CATTLE_SERVER_URL=https://127.0.0.1:444
docker run --privileged -d --restart=unless-stopped \
-p 81:80 -p 444:443 --name=rancher-server \
-e CATTLE_SYSTEM_DEFAULT_REGISTRY=registry.cn-hangzhou.aliyuncs.com \
-e CATTLE_BOOTSTRAP_PASSWORD=admin123 \
-e CATTLE_SYSTEM_CATALOG=bundled \
registry.cn-hangzhou.aliyuncs.com/rancher/rancher:v2.7.1
# 对于single agent节点可以指定local_ip/domain访问
docker run -d --privileged --restart=unless-stopped \
-p 81:80 -p 444:443 rancher/rancher-agent:v2.7.1 \
--server http://hostname:8080/v3 \
--token <token> --ca-checksum <checksum>
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。