赞
踩
1、概念
StorageClass是一个存储类,通过创建StorageClass可以动态生成一个存储卷,供k8s用户使用。
使用StorageClass可以根据PVC动态的创建PV,减少管理员手工创建PV的工作。
StorageClass的定义主要包括名称、后端存储的提供者(privisioner)和后端存储的相关参数配置。StorageClass一旦被创建,就无法修改,如需修改,只能删除重建。
2、创建
要使用 StorageClass,我们就得安装对应的自动配置程序,比如本篇文章使用的存储后端是 nfs,那么我们就需要使用到一个 NFS-Subdir-External-Provisioner 的自动配置程序,我们也叫它 Provisioner,
教程:https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner,这个程序使用我们已经配置好的 nfs 服务器,来自动创建持久卷,也就是自动帮我们创建 PV。
自动创建的 PV 以{namespace}-{pvcName}-{pvName} 这样的命名
格式创建在 NFS 服务器上的共享数据目录中
当这个 PV 被回收后会以archieved-{namespace}-{pvcName}-{pvName} 这样的命名格式存在 NFS 服务器上。
在部署NFS-Subdir-External-Provisioner 之前,我们需要先成功安装上 nfs 服务器,安装方法
1、NFS 介绍
NFS 即网络文件系统(Network File-System),可以通过网络让不同机器、不同系统之间可以实现文件共享。通过 NFS,可以访问远程共享目录,就像访问本地磁盘一样。NFS 只是一种文件系统,本身并没有传输功能,是基于 RPC(远程过程调用)协议实现的,采用 C/S 架构。
2、安装 NFS 软件包
- sudo apt-get install nfs-kernel-server # 安装 NFS服务器端
- sudo apt-get install nfs-common # 安装 NFS客户端
3、添加 NFS 共享目录
“/nfs/data” 目录设置为 NFS 共享目录,
- cat /etc/exports
-
- /nfs/data *(rw,sync,no_root_squash) # * 表示允许任何网段 IP 的系统访问该 NFS 目录
新建“/nfsroot”目录,并为该目录设置最宽松的权限:
sudo mkdir -p /nfs/data
sudo chmod -R 777 /nfs/data
sudo chown soft:soft /nfsroot/ -R # soft为当前用户,-R 表示递归更改该目录下所有文件
启动nfs服务器
systemctl status nfs-kernel-server.service 或者
sudo /etc/init.d/nfs-kernel.server start
验证nfs
- root@3nd1012:~# showmount -e
- Export list for 3nd1012:
- /nfs/data *
-
- root@1nd1009:~/nfs# showmount -e 192.168.10.12
- Export list for 192.168.10.12:
- /nfs/data *
-
root@1nd1009:~/nfs# cat serviceAccount.yaml
# rbac.:#唯一需要修改的地方只有namespace,根据实际情况定义
apiVersion: v1
kind: ServiceAccount # 创建一个账户,主要用来管理NFS provisioner在k8s集群中运行的权限
metadata:
name: nfs-client-provisioner
namespace: default
---
kind: ClusterRole # 创建集群角色
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner # 角色名
rules: # 角色权限
- apiGroups: [""]
resources: ["persistentvolumes"] # 操作的资源
verbs: ["get", "list", "watch", "create", "delete"] # 对该资源的操作权限
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding # 集群角色绑定
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects: # 角色绑定对象
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole # 哪个角色
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role # 创建角色
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner # 角色名
namespace: default # Role需要指定名称空间,ClusterRole 不需要
rules: # 角色权限
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding # 角色绑定
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects: # 角色绑定对象
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef: # 绑定哪个角色
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
root@1nd1009:~/nfs# cat provisioner.yaml
# 创建NFS provisioner
apiVersion: apps/v1
kind: Deployment # 部署nfs-client-provisioner
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner # 指定serviceAccount!
containers:
- name: nfs-client-provisioner
image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0 #镜像地址
imagePullPolicy: IfNotPresent
volumeMounts: # 挂载数据卷到容器指定目录
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME # 配置provisioner的Name
value: qgg-nfs-storage # 确保该名称与 StorageClass 资源中的provisioner名称保持一致
- name: NFS_SERVER #绑定的nfs服务器
value: 192.168.10.12
- name: NFS_PATH #绑定的nfs服务器目录
value: /nfs/data/
volumes: # 申明nfs数据卷
- name: nfs-client-root
nfs:
server: 192.168.10.12
path: /nfs/data/
4.创建 NFS StorageClass
我们在创建 PVC 时经常需要指定 storageClassName 名称,这个参数配置的就是一个 StorageClass 资源名称,PVC 通过指定该参数来选择使用哪个 StorageClass,并与其关联的 Provisioner 组件来动态创建 PV 资源。所以,这里我们需要提前创建一个 Storagelcass 资源。
root@1nd1009:~/nfs# cat storageClass.yaml
# 创建NFS资源的StorageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass # 创建StorageClass
metadata:
name: managed-nfs-storage
provisioner: qgg-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
archiveOnDelete: "false"
创建pod,申明PVC进行测试
root@1nd1009:~/nfs# cat pod-pvc.yaml
# 申明一个PVC,指定StorageClass
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
# 通过annotations注解,和storage-class进行关联,为什么不使用storageClassName,因为版本比较低
# 这里指定的名字就是上面创建的StorageClass的名字,让它帮我们创建PV
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1024Mi
---
# 创建测试pod,查看是否可以正常挂载
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc # 挂载数据卷
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim: # 数据卷挂载的是pvc
claimName: test-claim #与PVC名称保持一致
如果我们使用手动指定pvc的方式,当我们pod为多实例的时候,多个pod使用的是一个pvc。但是我们使用statefulset时,storageclass会为我们每一个Pod创建一个pv和pvc
root@1nd1009:~/nfs# cat test-StatefulSet.yaml
apiVersion: v1
kind: Service
metadata:
name: headless-svc
labels:
app: headless-svc
spec:
ports:
- name: myweb
port: 80
selector:
app: headless-pod
clusterIP: None
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: statefulset
spec:
serviceName: headless-svc
replicas: 3
selector:
matchLabels:
app: headless-pod
template:
metadata:
labels:
app: headless-pod
spec:
containers:
- name: myweb
image: nginx
volumeMounts:
- name: test-storage
mountPath: /usr/share/nginx/html
volumeClaimTemplates: # 通过模板化方式绑定
- metadata:
name: test-storage
annotations:
volume.beta.kubernetes.io/storage-class: managed-nfs-storage #只指定了storageClass
spec:
accessModes:
- ReadWriteOnce #访问模式
resources:
requests:
storage: 10Mi #存储空间
//写完之后,直接运行,并且,在此之前,我们并没有创建PV,PVC,现在查看集群中的资源,是否有这两种资源?
运行statefulset.yaml文件
接下来我们创建查看pv和pvc并且查看一下nfs挂载目录的文件
- root@1nd1009:~/nfs# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- nfs-client-provisioner-c6577876d-wkz4t 1/1 Running 1 (<invalid> ago) 63m
- nginx-56469d8695-lvnt4 1/1 Running 0 20h
- statefulset-0 1/1 Running 0 96s
- statefulset-1 1/1 Running 0 48s
- statefulset-2 0/1 ContainerCreating 0 41s
-
- root@1nd1009:~/nfs# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- test-claim Bound pvc-857fa01b-7340-4b1c-a0ff-005f394f8034 1Gi RWX managed-nfs-storage 56m
- test-storage-statefulset-0 Bound pvc-550be11e-80fb-4bc7-9a89-6167d138aab0 10Mi RWO managed-nfs-storage 2m41s
- test-storage-statefulset-1 Bound pvc-4d2ba435-add3-4ff5-9c46-b975e2ab1c7e 10Mi RWO managed-nfs-storage 113s
- test-storage-statefulset-2 Bound pvc-0d9e2c88-ad5f-4108-9c6b-4b9921b760c6 10Mi RWO managed-nfs-storage 106s
- root@1nd1009:~/nfs# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pvc-0d9e2c88-ad5f-4108-9c6b-4b9921b760c6 10Mi RWO Delete Bound default/test-storage-statefulset-2 managed-nfs-storage 112s
- pvc-4d2ba435-add3-4ff5-9c46-b975e2ab1c7e 10Mi RWO Delete Bound default/test-storage-statefulset-1 managed-nfs-storage 119s
- pvc-550be11e-80fb-4bc7-9a89-6167d138aab0 10Mi RWO Delete Bound default/test-storage-statefulset-0 managed-nfs-storage 2m47s
- pvc-857fa01b-7340-4b1c-a0ff-005f394f8034 1Gi RWX Delete Bound default/test-claim managed-nfs-storage 56m
- pvc-bc7a753e-c03c-4db6-a649-285c2509170e 1Gi RWO Delete Released dev/storage-pvc nfs-storage 124m
-
从上述结果中,我们知道,storageclass为我们自动创建了PV,volumeClaimTemplate为我们自动创建PVC,但是否能够满足我们所说的,每一个Pod都有自己独有的数据持久化目录,也就是说,每一个Pod内的数据都是不一样的。
分别在对应的PV下,模拟创建不同的数据。
-
- root@1nd1009:~/nfs# kubectl exec -it statefulset-0 bash
- statefulset-0
- root@1nd1009:~/nfs# kubectl exec -it statefulset-0 bash
- kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
- root@statefulset-0:/# ls
- bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var
- boot docker-entrypoint.d etc lib media opt root sbin sys usr
- root@statefulset-0:/# echo 00000 > /usr/share/nginx/html/index.html
- root@statefulset-0:/# cat /usr/share/nginx/html/index.html
- 00000
- root@statefulset-0:/# exit
- exit
- root@1nd1009:~/nfs# kubectl exec -it statefulset-1 bash
- kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
- root@statefulset-1:/# echo 1111 > /usr/share/nginx/html/index.html
- root@statefulset-1:/# exit
- exit
- root@1nd1009:~/nfs# kubectl exec -it statefulset-2 bash
- kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
- root@statefulset-2:/# echo 2222 > /usr/share/nginx/html/index.html
- root@statefulset-2:/# exit
- exit
-
回到nfs服务器查看存储目录下对应Pod的数据持久化目录,可以看出,每个Pod的内容都不一样
- root@3nd1012:~# cd /nfs/data/
- root@3nd1012:/nfs/data# ls
- default-test-claim-pvc-857fa01b-7340-4b1c-a0ff-005f394f8034
- default-test-storage-statefulset-0-pvc-550be11e-80fb-4bc7-9a89-6167d138aab0
- default-test-storage-statefulset-1-pvc-4d2ba435-add3-4ff5-9c46-b975e2ab1c7e
- default-test-storage-statefulset-2-pvc-0d9e2c88-ad5f-4108-9c6b-4b9921b760c6
- root@3nd1012:/nfs/data# cd default-test-storage-statefulset-0-pvc-550be11e-80fb-4bc7-9a89-6167d138aab0/
- root@3nd1012:/nfs/data/default-test-storage-statefulset-0-pvc-550be11e-80fb-4bc7-9a89-6167d138aab0# ls
- index.html
- root@3nd1012:/nfs/data/default-test-storage-statefulset-0-pvc-550be11e-80fb-4bc7-9a89-6167d138aab0# cat index.html
- 00000
- root@3nd1012:/nfs/data/default-test-storage-statefulset-0-pvc-550be11e-80fb-4bc7-9a89-6167d138aab0# cd ../
- root@3nd1012:/nfs/data# cat default-test-storage-statefulset-1-pvc-4d2ba435-add3-4ff5-9c46-b975e2ab1c7e/index.html
- 1111
- root@3nd1012:/nfs/data# cat default-test-storage-statefulset-2-pvc-0d9e2c88-ad5f-4108-9c6b-4b9921b760c6/index.html
- 2222
即使删除Pod,然后statefulSet这个Pod控制器会生成一个新的Pod,这里不看Pod的IP,名称肯定和之前的一致,而且,最主要是持久化的数据仍然存在。
- root@1nd1009:~# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- statefulset-1 1/1 Running 0 52m 10.233.198.224 3nd1012 <none> <none>
- statefulset-2 1/1 Running 0 52m 10.233.165.5 1nd1009 <none> <none>
-
- root@1nd1009:~# kubectl delete pod statefulset-2
- pod "statefulset-2" deleted
- root@1nd1009:~# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- statefulset-1 1/1 Running 0 53m 10.233.198.224 3nd1012 <none> <none>
- statefulset-2 0/1 ContainerCreating 0
- root@1nd1009:~# kubectl get pod -o wide
- statefulset-1 1/1 Running 0 53m 10.233.198.224 3nd1012 <none> <none>
- statefulset-2 1/1 Running 0 27s 10.233.165.6 1nd1009 <none> <none>
-
- root@1nd1009:~# curl 10.233.165.6
- 2222
- root@1nd1009:~#
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。