赞
踩
目录
5 设置三台服务器之间免密 执行完后务必退出shell然后重连
6 安装kubesphere必要依赖,每个节点都要装,不然报错:socat not found in system path
8 只用在主节点k8s-node1文件夹中下载k8s安装脚本 没有配置镜像加速可能会很慢多试几次
9 集群配置,创建配置文件,config-sample.yaml
12 耐心等待安装完成,会把所有工作节点添加到k8s-node1(时间大概5-10分钟)
- cd /etc/sysconfig/network-scripts
- vim ifcfg-ens33
- BOOTPROTO=static
- ONBOOT=yes
- IPADDR=192.168.1.211
- GATEWAY=192.168.1.1
- NETMASK=255.255.255.0
- DNS1=114.114.114.114
yum -y update
yum makecache fast
- yum install -y ntpdate
- ntpdate time.windows.com
- ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
- date
- systemctl stop firewalld
- systemctl disable firewalld
-
-
- /etc/selinux/config
-
- 将SELINUX的值设置为disabled
vim /etc/hosts
- 192.168.1.211 node1
- 192.168.1.212 node2
- 192.168.1.213 node3
- 1、先在所有服务器上执行命令:
- ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
-
- 2、而后在所有服务器上执行命令:这样自身就能免密登陆
- cat ~/.ssh/id_dsa.pub >>~/.ssh/authorized_keys
-
- 3、之后将每台服务器上的id_dsa.pub公钥发送到其他机器的/tmp文件夹下,如在master上执行
- scp ~/.ssh/id_dsa.pub node2:/tmp/
- scp ~/.ssh/id_dsa.pub node3:/tmp/
-
- 4、之后在其他的机器上将公钥追加到各自的authorized_keys里,执行以下命令:
- cat /tmp/id_dsa.pub >>~/.ssh/authorized_keys
-
- 5、同样的,在其他的机器上将公钥发送到其他服务器上,然后在其他服务器上将公钥追加到各自的authorized_keys即可
-
- 6、最后是测试免密钥连接。
- ssh node1
yum install -y socat conntrack ebtables ipset
- #在每个机器。
- yum install -y nfs-utils
-
-
-
- #在master执行以下命令
- echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
-
- #执行以下命令, 启动nfs服务;创建共享目录
- mkdir -p /nfs/data
-
- #在master执行
- systemctl enable rpcbind
- systemctl enable nfs-server
- systemctl start rpcbind
- systemctl start nfs-server
-
- #使配置生效
- exportfs-r
-
- #检查配置是否生效
- exportfs
- #改成自己的master的ip,只在从节点执行
- showmount -e 192.168.1.211
-
- mkdir -p /nfs/data
-
- mount -t nfs 192.168.1.211:/nfs/data /nfs/data
- ## 创建了一个存储类
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: nfs-storage
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
- provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
- parameters:
- archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
-
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: nfs-client-provisioner
- labels:
- app: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- spec:
- replicas: 1
- strategy:
- type: Recreate
- selector:
- matchLabels:
- app: nfs-client-provisioner
- template:
- metadata:
- labels:
- app: nfs-client-provisioner
- spec:
- serviceAccountName: nfs-client-provisioner
- containers:
- - name: nfs-client-provisioner
- image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
- # resources:
- # limits:
- # cpu: 10m
- # requests:
- # cpu: 10m
- volumeMounts:
- - name: nfs-client-root
- mountPath: /persistentvolumes
- env:
- - name: PROVISIONER_NAME
- value: k8s-sigs.io/nfs-subdir-external-provisioner
- - name: NFS_SERVER
- value: 192.168.1.211 ## 指定自己nfs服务器地址
- - name: NFS_PATH
- value: /nfs/data ## nfs服务器共享的目录
- volumes:
- - name: nfs-client-root
- nfs:
- server: 192.168.1.211
- path: /nfs/data
- ---
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- ---
- kind: ClusterRole
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: nfs-client-provisioner-runner
- rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch", "create", "delete"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["create", "update", "patch"]
- ---
- kind: ClusterRoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: run-nfs-client-provisioner
- subjects:
- - kind: ServiceAccount
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- roleRef:
- kind: ClusterRole
- name: nfs-client-provisioner-runner
- apiGroup: rbac.authorization.k8s.io
- ---
- kind: Role
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: leader-locking-nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- rules:
- - apiGroups: [""]
- resources: ["endpoints"]
- verbs: ["get", "list", "watch", "create", "update", "patch"]
- ---
- kind: RoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: leader-locking-nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- subjects:
- - kind: ServiceAccount
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: default
- roleRef:
- kind: Role
- name: leader-locking-nfs-client-provisioner
- apiGroup: rbac.authorization.k8s.io
- #应用
- kubectl apply -f sc.yaml
-
- #确认配置是否生效
- kubectl get sc
- export KKZONE=cn
- curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh -
- chmod +x kk
./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
- apiVersion: kubekey.kubesphere.io/v1alpha1
- kind: Cluster
- metadata:
- name: sample
- spec:
- hosts:
- - {name: node1, address: 192.168.1.211, internalAddress: 192.168.1.211, user: root, password: "root"}
- - {name: node2, address: 192.168.1.212, internalAddress: 192.168.1.212, user: root, password: "root"}
- - {name: node3, address: 192.168.1.213, internalAddress: 192.168.1.213, user: root, password: "root"}
- roleGroups:
- etcd:
- - node1
- master:
- - node1
- worker:
- - node2
- - node3
- controlPlaneEndpoint:
- domain: lb.kubesphere.local
- address: ""
- port: 6443
- kubernetes:
- version: v1.20.4
- imageRepo: kubesphere
- clusterName: cluster.local
- network:
- plugin: calico
- kubePodsCIDR: 10.233.64.0/18
- kubeServiceCIDR: 10.233.0.0/18
- registry:
- registryMirrors: "https://o83laiga.mirror.aliyuncs.com"
- insecureRegistries: []
- addons: []
./kk create cluster -f config-sample.yaml
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f
kubectl get nodes
更改daemon文件
- {
- "registry-mirrors": ["https://o83laiga.mirror.aliyuncs.com"],
- "exec-opts": ["native.cgroupdriver=systemd"],
- "log-driver": "json-file",
- "log-opts": {
- "max-size": "100m"
- }
- }
./kk delete cluster
高级模式删除
./kk delete cluster [-f config-sample.yaml]
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。