当前位置:   article > 正文

k8s环境搭建

k8s环境搭建

之前装了swarm 需要卸载

[root@server1 ~]# docker node ls
ID                            HOSTNAME              STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
39zp7zw06n1rslzvtsxviqjnq *   server1.example.com   Ready               Active              Leader              18.06.1-ce
hb4j7x1yu4a40ahtcocn5w7li     server2               Ready               Active                                  18.06.1-ce
tzhpszakws2hkgwplg9w6cvlw     server3               Ready               Active                                  18.06.1-ce


保证版本

docker swarm leave --force

docker container prune

这里使用k8s版本

禁止swap分区,直接用内存

 

server1 2 3

coredns.tar                    kubeadm-1.12.2-0.x86_64.rpm  kubernetes-cni-0.6.0-0.x86_64.rpm
cri-tools-1.12.0-0.x86_64.rpm  kube-apiserver.tar           kubernetes-dashboard.tar
etcd.tar                       kube-controller-manager.tar  kubernetes-dashboard.yaml
flannel.tar                    kubectl-1.12.2-0.x86_64.rpm  kube-scheduler.tar
heapster-grafana.tar           kube-flannel.yml             pause.tar
heapster-influxdb.tar          kubelet-1.12.2-0.x86_64.rpm  scope.tar
heapster.tar                   kube-proxy.tar               scope.yaml

 

swapoff -a

vim /etc/fstab   去掉swap

systemctl enable kubelet.service

systemctl enable docker

kubeadm config images list

保证镜像版本一致

docker load -i  kube-apiserver.tar 等tar

网络组建需要 flannel tar包

#!/bin/bash
docker load -i coredns.tar
docker load -i kube-apiserver.tar
docker load -i kubernetes-dashboard.tar
docker load -i etcd.tar
docker load -i kube-controller-manager.tar
docker load -i flannel.tar
docker load -i kube-scheduler.tar
docker load -i heapster-grafana.tar
docker load -i pause.tar
docker load -i heapster-influxdb.tar
docker load -i scope.tar
docker load -i heapster.tar
docker load -i kube-proxy.tar

docker load -i kube-scheduler.tar

/etc/sysconfig/kubelet

--fail-swap-on=false

[root@server1 k8s]# rpm -ql kubelet
/etc/kubernetes/manifests
/etc/sysconfig/kubelet
/etc/systemd/system/kubelet.service
/usr/bin/kubelet

 kubuadm reset 重置

server1

kubeadm  init --pod-network-cidr=10.244.0.0/16  --apiserver-advertise-address=172.25.11.1

kubeadm  init --pod-network-cidr=10.244.0.0/16  --apiserver-advertise-address=172.25.11.1    --ignore-preflight-errors=Swap

useradd k8s

vim /etc/sudoers

k8s  ALL=(ALL)      NOPASSWD:ALL

su - k8s

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

echo "source <(kubectl   competition bash)" >> ./.bashrc

cp  *.yml /home/k8s/

kubectl apply -f kube-flannel.yml

root下

chmod 777 kube-flannel.yml

 

server2 3

swapon -s

使用之前初始给出的的join命令

kubeadm join 172.25.11.1:6443 --token k8luh5.ch5f4gq4f8tnrnv4 --discovery-token-ca-cert-hash sha256:b02e1480a787d59f5b0b2615b85201913e718de273925b04f788c61a3ab0295d

导入两个模块

modprobe ip_vs_wrr

modprobe  ip_vs_sh

 

server1 

kubectl get nodes

kubectl get pod --all-namespaces

如果出现没有running的

kubectl delete pod ....  -n kube-system  会自动启动

 

因为是k8s布置的所以用k8s用户访问节点

[k8s@server1 ~]$ kubectl get nodes
NAME                  STATUS   ROLES    AGE     VERSION
server1.example.com   Ready    master   5m46s   v1.12.2
server2               Ready    <none>   5m23s   v1.12.2
server3               Ready    <none>   5m13s   v1.12.2
[k8s@server1 ~]$ kubectl get pod --all-namespaces
NAMESPACE     NAME                                          READY   STATUS             RESTARTS   AGE
kube-system   coredns-576cbf47c7-2mg4d                      0/1     CrashLoopBackOff   4          5m42s
kube-system   coredns-576cbf47c7-dt2f4                      0/1     CrashLoopBackOff   4          5m41s
kube-system   etcd-server1.example.com                      1/1     Running            0          5m9s
kube-system   kube-apiserver-server1.example.com            1/1     Running            0          5m18s
kube-system   kube-controller-manager-server1.example.com   1/1     Running            0          5m17s
kube-system   kube-flannel-ds-amd64-2l8wn                   1/1     Running            0          2m58s
kube-system   kube-flannel-ds-amd64-5f8z9                   1/1     Running            0          2m58s
kube-system   kube-flannel-ds-amd64-nld6s                   1/1     Running            0          2m58s
kube-system   kube-proxy-kll8q                              1/1     Running            0          5m42s
kube-system   kube-proxy-ks89p                              1/1     Running            0          5m39s
kube-system   kube-proxy-vklq4                              1/1     Running            0          5m28s
kube-system   kube-scheduler-server1.example.com            1/1     Running            0          5m18s

[k8s@server1 ~]$ kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}

如果不是running

kubectl delete pod  coredns-576cbf47c7-2mg4d  -n kube-system

再次查看

kubectl get pod --all-namespaces

联网才能使之running

 

 

 

 

 

 

 

 

本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/999709
推荐阅读
相关标签
  

闽ICP备14008679号