当前位置:   article > 正文

ubuntu22.04使用kk安装kubernates1.20.4和kubesphere3.1.1_apt remove nfs

apt remove nfs

注意

  • 存储空间不够可能安装失败

环境

  • master 192.168.1.108
  • node1 192.168.1.106
  • node2 192.168.1.102

root ssh登录

sudo passwd root
sudo apt install openssh-server
# 定位 /PermitRootLogin 添加 PermitRootLogin yes
# 注释掉#PermitRootLogin prohibit-password #StrictModes yes
sudo vim /etc/ssh/sshd_config
sudo service ssh restart
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

主机名

master

hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
  • 1
  • 2
  • 3

node1

hostnamectl set-hostname node1
  • 1

node2

hostnamectl set-hostname node2
  • 1

kk

master

export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh -
chmod +x kk

# 运行成功 当前文件下有config-sample.yaml
./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
# 需要修改的地方见下
vim config-sample.yaml
# 开始安装
./kk create cluster -f config-sample.yaml
# 查看安装进度
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

config-sample.yaml需要修改的地方

spec:
  hosts:#name是hostname,address和internalAddress换成内网地址,user主机用户名,password主机登录密码
  - {name: master, address: 10.140.126.6, internalAddress: 10.140.126.6, user: root, password: 12345678}
  - {name: node1, address: 10.140.122.56, internalAddress: 10.140.122.56, user: root, password: 12345678}
  - {name: node2, address: 10.140.122.39, internalAddress: 10.140.122.39, user: root, password: 12345678}
  roleGroups:
    etcd: #etcd在master中
    - master
    master: #master名称 设置为hostname
    - master
    worker: #worker名称 设置为hostname
    - node1
    - node2
# 应用商店 搜索openpitrix修改为true即可
openpitrix:
  store:
    enabled: true
# devops 搜索devops修改为true即可
devops:
  enabled: true
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

安装完成产生输出
Console: http://192.168.1.108:30880
Account: admin
Password: P@88w0rd

# 等待所有pod状态位running
kubectl get pod -A
  • 1
  • 2

访问 http://192.168.1.108:30880
在这里插入图片描述

若需要nfs存储服务则继续下面操作

nfs

master

sudo apt install nfs-kernel-server
# 补充,卸载 remove apt remove nfs-kernel-server
# 补充,node卸载 umount -f -l /nfs/data
# 创建nfs共享目录
mkdir -p /nfs/data
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 立刻启动并开机自启动
systemctl enable rpcbind --now
systemctl enable nfs-server --now
# 使配置生效
exportfs -r
#检查配置生效
exportfs
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

node1

showmount -e 192.168.1.108
mkdir -p /nfs/data
mount -t nfs 192.168.1.108:/nfs/data /nfs/data
  • 1
  • 2
  • 3

node2

showmount -e 192.168.1.108
mkdir -p /nfs/data
mount -t nfs 192.168.1.108:/nfs/data /nfs/data
  • 1
  • 2
  • 3

默认storage

master

vim nfs-storage.yaml
kubectl apply -f nfs-storage.yaml
  • 1
  • 2

nfs-storage.yaml

  • 注意修改 下面的两处IP
## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
          # resources:
          #    limits:
          #      cpu: 10m
          #    requests:
          #      cpu: 10m
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.1.108 ## 指定自己nfs服务器地址
            - name: NFS_PATH  
              value: /nfs/data  ## nfs服务器共享的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.108
            path: /nfs/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125

参考

参考

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/菜鸟追梦旅行/article/detail/645838
推荐阅读
相关标签
  

闽ICP备14008679号