当前位置:   article > 正文

CentOS8安装与配置Ceph Octopus教程

ceph octopus

1 环境介绍与配置

1.1 ceph介绍

#ceph架构
#ceph支持的三种接口:
1 Object:有原生的API,而且也兼容Swift和S3的API。
2 Block:支持精简配置、快照、克隆。
3 File:Posix接口,支持快照。
在这里插入图片描述

#ceph的三种类型的优缺点:在这里插入图片描述

1.2 环境介绍
[root@ceph135 ~]# cat /etc/redhat-release
CentOS Linux release 8.1.1911 (Core)

#ceph 
Octopus 15.2.3
#这个版本支持的系统
CentOS 8
CentOS 7 (partial–see below)#部分支持
Ubuntu 18.04 (Bionic)
Debian Buster
Container image (based on CentOS 8)
注意:dashboard、prometheus和restful manager模块不能在CentOS7版本中使用,因为CentOS7缺少对Python3模块的依赖。

#网络设计
172.16.1.0/24 #Management Network 可不用设
172.16.2.0/24 #Public Network
172.16.3.0/24 #Cluster Network

#每台ceph节点下除系统盘外,挂两个30G硬盘
ceph135 eth0:172.16.1.135 eth1:172.16.2.135 eth2:172.16.3.135 1c1g
ceph136 eth0:172.16.1.136 eth1:172.16.2.136 eth2:172.16.3.136 1c1g
ceph137 eth0:172.16.1.137 eth1:172.16.2.137 eth2:172.16.3.137 1c1g
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
1.2 基础环境准备
1.2.1 关闭selinux、防火墙
#关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
firewall-cmd --state
#关闭SElinux
sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
reboot
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
1.2.2 设置主机名,每台设置
hostnamectl set-hostname ceph135
su -
  • 1
  • 2
1.2.3 设置网卡对应网卡IP(自行更改网卡名进行替换IP)

#vim /etc/sysconfig/network-scripts/ifcfg-eth0

NetName=eth0
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet \
ip4 172.16.1.135/24 ipv4.dns "114.114.114.114" ipv4.gateway "172.16.1.254"
#设置完成后执行下reload网络
nmcli c reload
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

#(可选)如要指定默认路由,只要在对应的网卡配置加上以下配置,如:
#vim /etc/sysconfig/network-scripts/ifcfg-eth0

IPV4_ROUTE_METRIC=0
  • 1
1.2.4 在hosts里添加对应的ceph节点信息

#vim /etc/hosts

#[ceph]
172.16.2.135 ceph135
172.16.2.136 ceph136
172.16.2.137 ceph137
  • 1
  • 2
  • 3
  • 4
1.2.5 添加Octopus版本的yum源

#vim /etc/yum.repos.d/ceph.repo

[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/$basearch
enabled=1
gpgcheck=0
type=rpm-md

[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/noarch
enabled=1
gpgcheck=0
type=rpm-md

[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

#系统yum源更换成阿里源,并更新yum文件缓存

wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

yum clean all && yum makecache
  • 1
  • 2
  • 3
  • 4
1.2.6 时间同步

#个人比较喜欢用以下方式来同步时间

rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm

dnf  install wntp

ntpdate ntp3.aliyun.com 

echo "*/3 * * * * ntpdate ntp3.aliyun.com  &> /dev/null" > /tmp/crontab

crontab /tmp/crontab
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
1.2.7(可选)安装基础软件
yum install net-tools wget vim bash-completion lrzsz unzip zip -y
  • 1

2 ceph安装与配置

2.1 cephadm工具部署

#在15版本,支持使用cephadm工具部署,ceph-deploy在14版本前都支持
#拉取最新的cephadm,并赋权

curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
chmod +x cephadm
  • 1
  • 2

#使用cephadm获取octopus最新版本并安装

[root@ceph135 ~]# dnf install python3 podman #每台节点安装
[root@ceph135 ~]# ./cephadm add-repo --release octopus #可不操作,因为前面我们已经添加了国内ceph源

INFO:root:Writing repo to /etc/yum.repos.d/ceph.repo...
INFO:cephadm:Enabling EPEL...
[root@ceph135 ~]# ./cephadm install
INFO:cephadm:Installing packages ['cephadm']...
[root@ceph135 ~]# which cephadm
/usr/sbin/cephadm
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
2.2 创建ceph新集群
2.2.1 指定管理节点

#创建一个可以被任何访问Ceph集群的主机访问的网络,指定mon-ip,并将生成的配置文件写进/etc/ceph目录里

[root@ceph135 ~]# mkdir -p /etc/ceph
[root@ceph135 ~]# cephadm bootstrap --mon-ip 172.16.2.135
INFO:cephadm:Verifying podman|docker is present...
INFO:cephadm:Verifying lvm2 is present...
INFO:cephadm:Verifying time synchronization is in place...
INFO:cephadm:Unit chronyd.service is enabled and running
INFO:cephadm:Repeating the final host check...
INFO:cephadm:podman|docker (/usr/bin/podman) is present
INFO:cephadm:systemctl is present
INFO:cephadm:lvcreate is present
INFO:cephadm:Unit chronyd.service is enabled and running
INFO:cephadm:Host looks OK
INFO:root:Cluster fsid: b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef
INFO:cephadm:Verifying IP 172.16.2.135 port 3300 ...
INFO:cephadm:Verifying IP 172.16.2.135 port 6789 ...
INFO:cephadm:Mon IP 172.16.2.135 is in CIDR network 172.16.2.0/24
INFO:cephadm:Pulling latest docker.io/ceph/ceph:v15 container...
INFO:cephadm:Extracting ceph user uid/gid from container image...
INFO:cephadm:Creating initial keys...
INFO:cephadm:Creating initial monmap...
INFO:cephadm:Creating mon...
INFO:cephadm:Waiting for mon to start...
INFO:cephadm:Waiting for mon...
INFO:cephadm:Assimilating anything we can from ceph.conf...
INFO:cephadm:Generating new minimal ceph.conf...
INFO:cephadm:Restarting the monitor...
INFO:cephadm:Setting mon public_network...
INFO:cephadm:Creating mgr...
INFO:cephadm:Wrote keyring to /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Wrote config to /etc/ceph/ceph.conf
INFO:cephadm:Waiting for mgr to start...
INFO:cephadm:Waiting for mgr...
INFO:cephadm:mgr not available, waiting (1/10)...
INFO:cephadm:mgr not available, waiting (2/10)...
INFO:cephadm:Enabling cephadm module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 5...
INFO:cephadm:Setting orchestrator backend to cephadm...
INFO:cephadm:Generating ssh key...
INFO:cephadm:Wrote public SSH key to to /etc/ceph/ceph.pub
INFO:cephadm:Adding key to root@localhost's authorized_keys...
INFO:cephadm:Adding host ceph135...
INFO:cephadm:Deploying mon service with default placement...
INFO:cephadm:Deploying mgr service with default placement...
INFO:cephadm:Deploying crash service with default placement...
INFO:cephadm:Enabling mgr prometheus module...
INFO:cephadm:Deploying prometheus service with default placement...
INFO:cephadm:Deploying grafana service with default placement...
INFO:cephadm:Deploying node-exporter service with default placement...
INFO:cephadm:Deploying alertmanager service with default placement...
INFO:cephadm:Enabling the dashboard module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 12...
INFO:cephadm:Generating a dashboard self-signed certificate...
INFO:cephadm:Creating initial admin user...
INFO:cephadm:Fetching dashboard port number...
INFO:cephadm:Ceph Dashboard is now available at:

	     URL: https://ceph135:8443/
	    User: admin
	Password: vcxbz7cubp

INFO:cephadm:You can access the Ceph CLI with:

	sudo /usr/sbin/cephadm shell --fsid b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

INFO:cephadm:Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/

INFO:cephadm:Bootstrap complete.
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75

#至此,可登陆URL: https://ceph135:8443/,首次登陆要修改密码,进行验证
在这里插入图片描述
在这里插入图片描述

2.2.2 将ceph命令映射到本地

#Cephadm不需要在主机上安装任何Ceph包。但是,建议启用对ceph命令的简单访问。
#cephadm shell命令在安装了所有Ceph包的容器中启动一个bash shell。默认情况下,如果在主机上的/etc/ceph中找到配置和keyring文件,它们将被传递到容器环境中,这样外壳就可以完全正常工作了。

[root@ceph135 ~]# cephadm shell
INFO:cephadm:Inferring fsid 9849edac-a547-11ea-a767-12702e1b568d
INFO:cephadm:Using recent ceph image docker.io/ceph/ceph:v15
[ceph: root@ceph135 /]# alias ceph='cephadm shell -- ceph'
[ceph: root@ceph135 /]# exit
exit

[root@ceph135 ~]# cephadm install ceph-common
INFO:cephadm:Installing packages ['ceph-common']...

[root@ceph135 ~]# ceph -v
ceph version 15.2.3 (d289bbdec69ed7c1f516e0a093594580a76b78d0) octopus (stable)

[root@ceph135 ~]# ceph status
  cluster:
    id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            OSD count 0 < osd_pool_default_size 3

  services:
    mon: 1 daemons, quorum ceph135 (age 19m)
    mgr: ceph135.omlfxo(active, since 15m)
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     100.000% pgs unknown
             1 unknown

[root@ceph135 ~]# ceph health
HEALTH_WARN Reduced data availability: 1 pg inactive; OSD count 0 < osd_pool_default_size 3
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
2.2.3 添加新服务器进ceph集群
[root@ceph135 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph136
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph136 (172.16.2.136)' can't be established.
ECDSA key fingerprint is SHA256:UiF5sLefJuaY6uueUxyu0t0Xdeha8BPZXGvQHZrco1M.
ECDSA key fingerprint is MD5:87:59:6e:b5:42:6d:c4:02:d8:ef:29:56:4e:0d:1d:09.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph136's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@ceph136'"
and check to make sure that only the key(s) you wanted were added.

[root@ceph135 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph137
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph137 (172.16.2.137)' can't be established.
ECDSA key fingerprint is SHA256:UiF5sLefJuaY6uueUxyu0t0Xdeha8BPZXGvQHZrco1M.
ECDSA key fingerprint is MD5:87:59:6e:b5:42:6d:c4:02:d8:ef:29:56:4e:0d:1d:09.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph137's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@ceph137'"
and check to make sure that only the key(s) you wanted were added.

[root@ceph135 ~]# ceph orch host add ceph136
Added host 'ceph136'
[root@ceph135 ~]# ceph orch host add ceph137
Added host 'ceph137'
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
2.2.4 部署添加 monitor

#设置public_network网段,提供给client访问

ceph config set mon public_network 172.16.2.0/24
  • 1

#选择需要设置mon的节点,我这里是全选

[root@ceph135 ~]# ceph orch host label add ceph135 mon
Added label mon to host ceph135
[root@ceph135 ~]# ceph orch host label add ceph136 mon
Added label mon to host ceph136
[root@ceph135 ~]# ceph orch host label add ceph137 mon
Added label mon to host ceph137
[root@ceph135 ~]# ceph orch host ls
HOST     ADDR     LABELS  STATUS
ceph135  ceph135  mon
ceph136  ceph136  mon
ceph137  ceph137  mon
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

#告诉cephadm根据标签部署mon,这步需要等待各节点拉取images并启动容器

[root@ceph135 ~]# ceph orch apply mon label:mon
Scheduled mon update...
#具体验证是否安装完成,其他两台节点可查看下
[root@ceph136 ~]# podman ps -a
CONTAINER ID  IMAGE                                COMMAND               CREATED         STATUS             PORTS  NAMES
a24ab51b5f62  docker.io/prom/node-exporter:latest  --no-collector.ti...  5 minutes ago   Up 5 minutes ago          ceph-b3add0aa-ae
37ef832554fd  docker.io/ceph/ceph:v15              -n mon.ceph136 -f...  6 minutes ago   Up 6 minutes ago          ceph-b3add0aa-ae
10122c06ad1a  docker.io/ceph/ceph:v15              -n mgr.ceph136.iy...  7 minutes ago   Up 7 minutes ago          ceph-b3add0aa-ae
df5275a6684f  docker.io/ceph/ceph:v15              -n client.crash.c...  12 minutes ago  Up 12 minutes ago         ceph-b3add0aa-ae
[root@ceph136 ~]# podman images
REPOSITORY                     TAG      IMAGE ID       CREATED       SIZE
docker.io/ceph/ceph            v15      d72755c420bc   2 weeks ago   1.13 GB
docker.io/prom/node-exporter   latest   14191dbfb45b   2 weeks ago   27.7 MB
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
2.2.5 部署OSD

#查看可使用的硬盘

[root@ceph135 ~]# ceph orch device ls
HOST     PATH      TYPE   SIZE  DEVICE                     AVAIL  REJECT REASONS
ceph135  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph135  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph135  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked
ceph136  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph136  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph136  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked
ceph137  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph137  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph137  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

#为了图方便,我这里直接使用所有可用硬盘

[root@ceph135 ~]# ceph orch apply osd --all-available-devices
NAME                  HOST    DATA     DB WAL
all-available-devices ceph135 /dev/sdb -  -
all-available-devices ceph135 /dev/sdc -  -
all-available-devices ceph136 /dev/sdb -  -
all-available-devices ceph136 /dev/sdc -  -
all-available-devices ceph137 /dev/sdb -  -
all-available-devices ceph137 /dev/sdc -  -
#添加单块盘的方式
ceph orch daemon add osd ceph135:/dev/sdb
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

#验证部署情况

[root@ceph135 ~]# ceph osd df
ID  CLASS  WEIGHT   REWEIGHT  SIZE     RAW USE  DATA     OMAP  META   AVAIL    %USE  VAR   PGS  STATUS
 0    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      up
 1    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up
 2    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up
 3    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      up
 4    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up
 5    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      up
                       TOTAL  192 GiB  6.0 GiB   32 MiB   0 B  6 GiB  186 GiB  3.14
MIN/MAX VAR: 1.00/1.00  STDDEV: 0
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

3 存储部署

3.1 CephFS部署

#部署cephfs的mds服务,指定集群名及mds的数量

[root@ceph135 ~]# ceph orch apply mds fs-cluster --placement=3
Scheduled mds.fs-cluster update...
  • 1
  • 2

#验证:

[root@ceph135 ~]# ceph -s
  cluster:
    id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph135,ceph136,ceph137 (age 47s)
    mgr: ceph135.omlfxo(active, since 89m), standbys: ceph136.iyehke, ceph137.fywkvw
    mds:  3 up:standby
    osd: 6 osds: 6 up (since 20m), 6 in (since 20m)

  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   6.0 GiB used, 186 GiB / 192 GiB avail
    pgs:     1 active+clean
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
3.2 部署RGW

#创建一个领域:

[root@ceph135 ~]# radosgw-admin realm create --rgw-realm=rgw-org --default
{
    "id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc",
    "name": "rgw-org",
    "current_period": "06f0511d-58cd-4acd-aac1-da25ea785454",
    "epoch": 1
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

#创建一个zonegroup

[root@ceph135 ~]# radosgw-admin zonegroup create --rgw-zonegroup=rgwgroup --master --default
{
    "id": "35dcfee7-fa47-4e53-b41d-9718fd029782",
    "name": "rgwgroup",
    "api_name": "rgwgroup",
    "is_master": "true",
    "endpoints": [],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "",
    "zones": [],
    "placement_targets": [],
    "default_placement": "",
    "realm_id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc",
    "sync_policy": {
        "groups": []
    }
}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19

#创建一个区域

[root@ceph135 ~]# radosgw-admin zone create --rgw-zonegroup=rgwgroup --rgw-zone=zone-dc1 --master --default
{
    "id": "ec441ad3-1167-459d-9d1c-cf21e5625cbf",
    "name": "zone-dc1",
    "domain_root": "zone-dc1.rgw.meta:root",
    "control_pool": "zone-dc1.rgw.control",
    "gc_pool": "zone-dc1.rgw.log:gc",
    "lc_pool": "zone-dc1.rgw.log:lc",
    "log_pool": "zone-dc1.rgw.log",
    "intent_log_pool": "zone-dc1.rgw.log:intent",
    "usage_log_pool": "zone-dc1.rgw.log:usage",
    "roles_pool": "zone-dc1.rgw.meta:roles",
    "reshard_pool": "zone-dc1.rgw.log:reshard",
    "user_keys_pool": "zone-dc1.rgw.meta:users.keys",
    "user_email_pool": "zone-dc1.rgw.meta:users.email",
    "user_swift_pool": "zone-dc1.rgw.meta:users.swift",
    "user_uid_pool": "zone-dc1.rgw.meta:users.uid",
    "otp_pool": "zone-dc1.rgw.otp",
    "system_key": {
        "access_key": "",
        "secret_key": ""
    },
    "placement_pools": [
        {
            "key": "default-placement",
            "val": {
                "index_pool": "zone-dc1.rgw.buckets.index",
                "storage_classes": {
                    "STANDARD": {
                        "data_pool": "zone-dc1.rgw.buckets.data"
                    }
                },
                "data_extra_pool": "zone-dc1.rgw.buckets.non-ec",
                "index_type": 0
            }
        }
    ],
    "realm_id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc"
}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40

#为特定领域和区域部署一组radosgw守护进程,这里只指定了两个节点开启rgw

[root@ceph135 ~]# ceph orch apply rgw rgw-org zone-dc1 --placement="2 ceph136 ceph137"
Scheduled rgw.rgw-org.zone-dc1 update...
  • 1
  • 2

#验证

[root@ceph135 ~]# ceph -s
  cluster:
    id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef
    health: HEALTH_WARN
            1 daemons have recently crashed

  services:
    mon: 3 daemons, quorum ceph135,ceph136,ceph137 (age 9m)
    mgr: ceph135.omlfxo(active, since 108m), standbys: ceph136.iyehke, ceph137.fywkvw
    mds:  3 up:standby
    osd: 6 osds: 6 up (since 39m), 6 in (since 39m)
    rgw: 2 daemons active (rgw-org.zone-dc1.ceph136.ddujbi, rgw-org.zone-dc1.ceph137.mnfhhp)

  task status:

  data:
    pools:   5 pools, 129 pgs
    objects: 105 objects, 5.4 KiB
    usage:   6.1 GiB used, 186 GiB / 192 GiB avail
    pgs:     1.550% pgs not active
             127 active+clean
             2   peering

  io:
    client:   7.9 KiB/s rd, 0 B/s wr, 8 op/s rd, 4 op/s wr

  progress:
    PG autoscaler decreasing pool 5 PGs from 32 to 8 (0s)
      [............................]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

#为RGW开启dashborad
#创建rgw的管理用户

[root@ceph135 ~]# radosgw-admin user create --uid=admin --display-name=admin --system
{
    "user_id": "admin",
    "display_name": "admin",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {
            "user": "admin",
            "access_key": "XY518C4I2RO51D4S2JGT",
            "secret_key": "e9akFxQwOM8Y9zxDum4CLCQEOXaImVomGiqIsutC"
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "system": "true",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41

#设置dashboard凭证

[root@ceph135 ~]# ceph dashboard set-rgw-api-access-key XY518C4I2RO51D4S2JGT
Option RGW_API_ACCESS_KEY updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-secret-key e9akFxQwOM8Y9zxDum4CLCQEOXaImVomGiqIsutC
Option RGW_API_SECRET_KEY updated
  • 1
  • 2
  • 3
  • 4

#设置禁用证书验证、http访问方式及使用admin账号

[root@ceph135 ~]# ceph dashboard set-rgw-api-ssl-verify False
Option RGW_API_SSL_VERIFY updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-scheme http
Option RGW_API_SCHEME updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-host 172.16.2.137
Option RGW_API_HOST updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-port 80
Option RGW_API_PORT updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-user-id admin
Option RGW_API_USER_ID updated
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

#重启RGW

[root@ceph135 ~]# ceph orch restart rgw
restart rgw.rgw-org.zone-dc1.ceph136.ddujbi from host 'ceph136'
restart rgw.rgw-org.zone-dc1.ceph137.mnfhhp from host 'ceph137'
  • 1
  • 2
  • 3

在这里插入图片描述

X.部署过程遇到的问题

eg1.
[root@ceph135 ~]# cephadm shell
ERROR: Cannot infer an fsid, one must be specified: ['00482894-a564-11ea-8617-12702e1b568d', '9849edac-a547-11ea-a767-12702e1b568d']
解决方案:删除掉old集群数据,只留新集群文件夹即可
[root@ceph135 ceph]# cd /var/lib/ceph
[root@ceph135 ceph]# ls
00482894-a564-11ea-8617-12702e1b568d  9849edac-a547-11ea-a767-12702e1b568d
[root@ceph135 ceph]# rm -rf 9849edac-a547-11ea-a767-12702e1b568d/
[root@ceph135 ceph]# ll

eg2.[root@ceph135 ~]# ./cephadm add-repo --release octopus
-bash: ./cephadm: /usr/bin/python3: bad interpreter: No such file or directory
解决方案:dnf install python3

eg3.[root@ceph135 ~]# ./cephadm install
Unable to locate any of ['podman', 'docker']
解决方案:dnf install -y podman

eg4.ERROR: lvcreate binary does not appear to be installed
解决方案:yum install lvm2

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/189954
推荐阅读
相关标签
  

闽ICP备14008679号