赞
踩
#创建rbd
#创建存储池,指定pg和pgp的数量, pgp是对存在于pg的数据进行组合存储,pgp通常等于pg的值
# 创建存储池
ceph osd pool create kubernetes 128 128
#对存储池启用 RBD 功能
ceph osd pool application enable kubernetes rbd
#通过 RBD 命令对存储池初始化
rbd pool init -p kubernetes
#创建用户
#查看pool
ceph osd pool ls
#查看adim秘钥
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'}
#在Ceph上创建用户
ceph auth add client.kubernetes mon 'allow r' osd 'allow rwx pool=kubernetes'
#创建访问pool秘钥
ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
#查看信息
#获取Ceph信息
ceph mon dump
#######################################################################
fsid 83baa63b-c421-480a-be24-0e2c59a70e17
min_mon_release 15 (octopus)
0: [v2:192.168.100.201:3300/0,v1:192.168.100.201:6789/0] mon.vm-201
1: [v2:192.168.100.202:3300/0,v1:192.168.100.202:6789/0] mon.vm-202
2: [v2:192.168.100.203:3300/0,v1:192.168.100.203:6789/0] mon.vm-203
#检查创建的用户
ceph auth get client.kubernetes
#######################################################################
exported keyring for client.kubernetes
[client.kubernetes]
key = AQD7QJxhQ4xJARAAHbBdXZ43xxSiTRscbynLWA==
caps mgr = "profile rbd pool=kubernetes"
caps mon = "profile rbd"
caps osd = "profile rbd pool=kubernetes"
#######################################################################
#安装python3
yum install python3 -y
#下载cephadm
wget https://github.com/ceph/ceph/raw/v15.2.17/src/cephadm/cephadm
#执行授权
chmod +x cephadm
#指定ceph15版本
./cephadm add-repo --release 15.2.17
#安装ceph-common
yum install ceph-common -y
cat > /root/1-csi-config-map.yaml << EOF
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "83baa63b-c421-480a-be24-0e2c59a70e17",
"monitors": [
"192.168.100.201:6789",
"192.168.100.202:6789",
"192.168.100.203:6789"
]
}
]
metadata:
name: ceph-csi-config
EOF
kubectl apply -f /root/1-csi-config-map.yaml
cat > /root/2-csi-rbd-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
stringData:
userID: kubernetes
userKey: AQD7QJxhQ4xJARAAHbBdXZ43xxSiTRscbynLWA==
EOF
kubectl apply -f /root/2-csi-rbd-secret.yaml
cat > /root/3-csi-provisioner-rbac.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
EOF
cat > /root/4-csi-nodeplugin-rbac.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
# allow to read Vault Token and connection options from the Tenants namespace
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
EOF
cat > /root/5-csi-rbdplugin.yaml << EOF
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin
spec:
selector:
matchLabels:
app: csi-rbdplugin
template:
metadata:
labels:
app: csi-rbdplugin
spec:
serviceAccountName: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
priorityClassName: system-node-critical
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
imagePullPolicy: "IfNotPresent"
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:v3.4.0
args:
- "--nodeid=$(NODE_ID)"
- "--pluginpath=/var/lib/kubelet/plugins"
- "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/pv/"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--enableprofiling=false"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: spec.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /run/mount
name: host-mount
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
# - name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
securityContext:
privileged: true
image: quay.io/cephcsi/cephcsi:v3.4.0
imagePullPolicy: "IfNotPresent"
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config
# - name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
selector:
app: csi-rbdplugin
EOF
cat > /root/6-csi-rbdplugin-provisioner.yaml << EOF
---
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
labels:
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-rbdplugin-provisioner
spec:
replicas: 3
selector:
matchLabels:
app: csi-rbdplugin-provisioner
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-rbdplugin-provisioner
topologyKey: "kubernetes.io/hostname"
serviceAccountName: rbd-csi-provisioner
priorityClassName: system-cluster-critical
containers:
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
imagePullPolicy: "IfNotPresent"
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--retry-interval-start=500ms"
- "--leader-election=true"
# set it to true to use topology based provisioning
- "--feature-gates=Topology=false"
# if fstype is not specified in storageclass, ext4 is default
- "--default-fstype=ext4"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--leader-election=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:v3.4.0
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
- "--enableprofiling=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: spec.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
# - name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: csi-rbdplugin-controller
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:v3.4.0
args:
- "--type=controller"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--drivernamespace=$(DRIVER_NAMESPACE)"
env:
- name: DRIVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:v3.4.0
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-config
configMap:
name: ceph-csi-config
# - name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
EOF
#部署
kubectl apply -f /root/3-csi-provisioner-rbac.yaml
kubectl apply -f /root/4-csi-nodeplugin-rbac.yaml
kubectl apply -f /root/5-csi-rbdplugin.yaml
kubectl apply -f /root/6-csi-rbdplugin-provisioner.yaml
kubectl get pod -o wide
cat > /root/7-csi-rbd-sc.yaml << EOF
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
namespace: default
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 83baa63b-c421-480a-be24-0e2c59a70e17
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/fstype: xfs
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
EOF
kubectl apply -f /root/7-csi-rbd-sc.yaml
kubectl get sc -o wide
#针对 PV 持久卷,Kubernetes 支持两种卷模式(volumeModes):Filesystem(文件系统) 和 Block(块)。
#volumeMode 是一个可选的 API 参数。
#如果该参数被省略,默认的卷模式是 Filesystem。
#volumeMode 属性设置为 Filesystem 的卷会被 Pod 挂载(Mount) 到某个目录。
#如果卷的存储来自某块设备而该设备目前为空,Kuberneretes 会在第一次挂载卷之前 在设备上创建文件系统。
cat > /root/8-test-rbd-pvc.yaml << EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: raw-block-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
kubectl apply -f /root/8-test-rbd-pvc.yaml
kubectl get pvc -o wide
#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
#raw-block-pvc Bound pvc-beb3a8c6-b659-44c6-bc5c-5fbd3ead1706 1Gi RWO csi-rbd-sc 10s Block
cat > /root/9-test-rbd-pod.yaml << EOF
---
apiVersion: v1
kind: Pod
metadata:
name: pod-with-raw-block-volume
namespace: default
spec:
containers:
- name: nginx-pod
image: nginx
imagePullPolicy: "IfNotPresent"
command: ["/bin/sh", "-c"]
args: ["tail -f /dev/null"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: raw-block-pvc
EOF
kubectl apply -f /root/9-test-rbd-pod.yaml
kubectl get pod -o wide
#针对 PV 持久卷,Kubernetes 支持两种卷模式(volumeModes):Filesystem(文件系统) 和 Block(块)。
#volumeMode 是一个可选的 API 参数。
#如果该参数被省略,默认的卷模式是 Filesystem。
#volumeMode 属性设置为 Filesystem 的卷会被 Pod 挂载(Mount) 到某个目录。
#如果卷的存储来自某块设备而该设备目前为空,Kuberneretes 会在第一次挂载卷之前 在设备上创建文件系统。
cat > /tmp/pvc.yaml << EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
kubectl apply -f /tmp/pvc.yaml
kubectl get pvc -o wide
#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
#raw-block-pvc Bound pvc-beb3a8c6-b659-44c6-bc5c-5fbd3ead1706 1Gi RWO csi-rbd-sc 81m Block
#rbd-pvc Bound pvc-3a57e9e7-31c1-4fe6-958e-df4a985654c5 1Gi RWO csi-rbd-sc 29m Filesystem
cat > /tmp/pod.yaml << EOF
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
spec:
containers:
- name: web
image: nginx:1.21
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
EOF
kubectl apply -f /tmp/pod.yaml
# kubectl exec -it csi-rbd-demo-pod -- df -h
#Filesystem Size Used Avail Use% Mounted on
#overlay 20G 4.8G 16G 24% /
#tmpfs 64M 0 64M 0% /dev
#tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
#/dev/sda1 20G 4.8G 16G 24% /etc/hosts
#shm 64M 0 64M 0% /dev/shm
#/dev/rbd0 976M 2.6M 958M 1% /var/lib/www/html <-- 存储空间已挂载
#tmpfs 1.9G 12K 1.9G 1% /run/secrets/kubernetes.io/serviceaccount
#tmpfs 1.9G 0 1.9G 0% /proc/acpi
#tmpfs 1.9G 0 1.9G 0% /proc/scsi
#tmpfs 1.9G 0 1.9G 0% /sys/firmware
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。