当前位置:   article > 正文

openstack-victoria-手动部署基础版_172.16.20.2

172.16.20.2

环境准备

网卡配置(全部)
vi /etc/sysconfig/network-scripts/ifcfg-ens33
  • 1
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=ens33
DEVICE=ens33
ONBOOT=yes
IPADDR=172.16.20.2
NETMASK=255.255.254.0
GATEWAY=172.16.20.1
DNS1=8.8.8.8
DNS2=114.114.114.114
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
#我用的workstation,添加网卡,再查看(或者重启查看systemctl restart NetworkManager)
如果是后添加,添加的网卡名字与ip addr查出的一致
vi /etc/sysconfig/network-scripts/ifcfg-ens37
  • 1
  • 2
  • 3
NAME=ens37
DEVICE=ens37
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
IPADDR=192.168.109.124
NETMASK=255.255.255.0

#centos8重启网卡方式
systemctl restart NetworkManager
nmcli c up ens33
nmcli c up ens37
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
关闭防火墙(全部)
systemctl stop firewalld && systemctl disable firewalld
  • 1
关闭SElinux(全部)
#openstack 关闭selinux主要是为了降低ssh等操作的可能出现的权限限制,后面将下载openstack-selinux进行取代。

sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
  • 1
  • 2
  • 3
  • 4
  • 5
修改主机名和配置主机映射(全部)
#控制节点
hostnamectl set-hostname controller
bash

#计算节点:
hostnamectl set-hostname compute
bash

#编辑 /etc/hosts
echo "172.16.20.2 controller" >> /etc/hosts
echo "172.16.20.3 compute" >> /etc/hosts
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
更换yum源(全部)
centos8(centos8官方源已下线,建议切换centos-vault源)
#wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo

或者curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo


#2022开始的centos官方yum源需要添加(在之前的源上可以添加)
sudo sed -i -e "s|mirrorlist=|#mirrorlist=|g" /etc/yum.repos.d/CentOS-*
sudo sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-*


yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip libaio-devel vim ncurses-devel autoconf automake zlib-devel git
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
时间同步(全部)
#控制节点
yum install -y chrony
sed -i '3,6s/^/#/g' /etc/chrony.conf
echo "local stratum 10" >>/etc/chrony.conf
echo "server controller iburst" >>/etc/chrony.conf
echo "allow 172.16.20.0/24" >>/etc/chrony.conf


#计算节点
yum install -y chrony
sed -i '3,6s/^/#/g' /etc/chrony.conf
echo "local stratum 10" >>/etc/chrony.conf
echo "server controller iburst" >>/etc/chrony.conf


#全部节点:所有节点配置完成后执行
systemctl enable chronyd.service
systemctl restart chronyd.service
chronyc sources

##正常所以节点显示 ^* 不是^?问号(不对请检查映射,配置,再全部重启chronyc服务)
[root@compute ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* controller                   11   6    17    42    +18us[  +27us] +/- 6496us
[root@compute ~]#
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
安装数据库(controller)
yum install mariadb mariadb-server python3-mod_wsgi python3-PyMySQL -y
vi /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 172.16.20.2
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

systemctl enable mariadb.service
systemctl start mariadb.service
#信息自己填
mysql_secure_installation
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
消息队列(controller)(重启后需要再次运行rabbitmq,并给权限-第四步操作)
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

rabbitmqctl add_user openstack 123456
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#开启rabbitmq管理页面,可不执行(账号密码:guest)
rabbitmq-plugins enable rabbitmq_management

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
内存缓存(controller)
yum install memcached python3-memcached -y

vi /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 172.16.20.2,::1"
#OPTIONS="-l 0.0.0.0,::1"##速度提升但不安全

systemctl enable memcached.service
systemctl start memcached.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
etcd服务(controller)
yum install etcd -y

sed -i -e '5s/#//g' \
-e '20s/#//g' \
-e '6s/#//g' \
-e '26s/#//g' \
-e '27s/#//g' \
-e '28s/#//g' \
-e '5,26s/localhost/'172.16.20.2'/g' \
-e '9s/default/'controller'/g' \
-e '26s/default/'controller'/g' \
-e '27s/etcd-cluster/etcd-cluster-01/g' /etc/etcd/etcd.conf

##过滤前28行的值,检查配置,9行可用

[root@controller ~]# head -n 28 /etc/etcd/etcd.conf | egrep -v '^$|^#'
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://172.16.20.2:2380"
ETCD_LISTEN_CLIENT_URLS="http://172.16.20.2:2379"
ETCD_NAME="controller"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://172.16.20.2:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://172.16.20.2:2379"
ETCD_INITIAL_CLUSTER="controller=http://172.16.20.2:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@controller ~]# 

systemctl start etcd
systemctl enable etcd
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
安装对应版本存储库(全部)

###全部运行两边,查看是否全部下载

yum install python3 python3-devel python3-pip -y
  • 1

###启用存储库

yum install centos-release-openstack-victoria -y
  • 1

官网的是PwoerTools,但是没有这个包。网上叫用小写,正常,不返回值

yum config-manager --set-enabled powertools -y
yum upgrade -y
yum install python3-openstackclient -y
yum install openstack-selinux -y
  • 1
  • 2
  • 3
  • 4

安装服务

安装服务基本步骤:

1、创建服务的数据并授权

2、在keystone创建用户,关联角色

3、在keystone创建服务,注册api

4、安装服务软件包和依赖

5、修改配置

6、同步数据库

7、启用服务
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

keystone认证服务(controller)

1、创建keystone数据库

mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
flush privileges;
quit;


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

2、keystone包、apache包和apache拓展模块(apache需要wsgi关联python)

yum install openstack-keystone httpd python3-mod_wsgi -y
  • 1

3、备份配置文件,过滤掉#和空格,便于修改和观察

cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
egrep -Ev '^#|^$' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
  • 1
  • 2

4、修改配置文件(脚本编写,需要下载openstack-utils包,略)

vi /etc/keystone/keystone.conf

[database]
connection = mysql+pymysql://keystone:123456@controller/keystone
[token]
provider = fernet
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

5、同步数据库和初始化 Fernet 密钥库:

su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#查看keystone是否有表
mysql -uroot -p keystone -e 'show tables;'
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

6、引导身份服务:

keystone-manage bootstrap --bootstrap-password 123456 \
  --bootstrap-admin-url http://controller:5000/v3/ \
  --bootstrap-internal-url http://controller:5000/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne
  • 1
  • 2
  • 3
  • 4
  • 5
配置 Apache HTTP 服务器(keystone是由apache启动的)
#编辑/etc/httpd/conf/httpd.conf文件并配置 ServerName选项以引用控制器节点
echo "ServerName controller" >> /etc/httpd/conf/httpd.conf

#创建/usr/share/keystone/wsgi-keystone.conf文件链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

systemctl restart httpd.service
systemctl enable httpd.service
systemctl status httpd.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
#配置administrative账号(管理者)
cat >> ~/admin-openrc << EOF
export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
EOF
chmod +x admin-openrc
. admin-openrc
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
创建域、项目、用户和角色

1、创建域

openstack domain create --description "An Example Domain" example
  • 1

2、创建service 项目

openstack project create --domain default   --description "Service Project" service
  • 1

3、(非管理员)任务应使用非特权项目和用户,例如:demo用户

openstack project create --domain default --description "Demo Project" myproject
openstack user create --domain default --password-prompt myuser
  • 1
  • 2

4、创建用户角色

openstack role create myrole
  • 1

5、把角色给添加到demo用户中。

openstack role add --project myproject --user myuser myrole
  • 1
验证操作

1、取消环境变量

unset OS_AUTH_URL OS_PASSWORD
  • 1

2、验证管理用户admin

openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
  • 1
  • 2
  • 3

3、验证非管理用户demo

openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name myproject --os-username myuser token issue
  • 1
  • 2
  • 3
创建openstack 客户端环境脚本(每重新远程使用openstack命令时,需要执行脚本,)

1.创建admin-openrc脚本

略(上)
  • 1

2.创建demo-openrc脚本

cat >> ~/demo-openrc << EOF
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
EOF
chmod +x demo-openrc
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

3、验证脚本,正常返回

. admin-openrc
#   . demo-openrc
openstack token issue


[root@controller ~]# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2021-06-11T09:40:23+0000                                                                                                                                                                |
| id         | gAAAAABgwyF38PwZLkNBU2u0lzjwU4n3NFcGyGyNCQY7haiT51thYWTP31iMYedVd4NIIgRso0mRb04SQMuum45btZKKLDt2sS3H9Ep8crctMIVUs6gE4WQJaQHGuqNaQMEzFfgm3pACc9I730C9Y821jdLUm1njtNM2vVdegO6Cps6aaCKF1VQ |
| project_id | 6b6fd684a8ef4d4ebe47d5f7a6069985                                                                                                                                                        |
| user_id    | 06bb1201bbed43209262cf7a09e42c1b                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
[root@controller ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

Glance镜像服务(controller)

创建glance数据库,并授权

1、创建glance数据库并授权

mysql -u root -p

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%'  IDENTIFIED BY '123456';
flush privileges;
quit
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

1、创建glance用户

. admin-openrc
openstack user create --domain default --password-prompt glance
  • 1
  • 2

2、把admin用户添加到glance用户和项目中

openstack role add --project service --user glance admin
  • 1

3、创建glance服务

openstack service create --name glance  --description "OpenStack Image" image
  • 1

4、创建镜像服务API端点

openstack endpoint create --region RegionOne  image public http://controller:9292

openstack endpoint create --region RegionOne  image internal http://controller:9292

openstack endpoint create --region RegionOne  image admin http://controller:9292

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
安装和配置组件

1.安装软件包

yum install openstack-glance -y
  • 1

2、备份配置文件,并过滤带有#和空格

cp -a /etc/glance/glance-api.conf{,.bak} 

egrep -v '^$|#' /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf
  • 1
  • 2
  • 3

3、编辑/etc/glance/glance-api.conf文件

vi /etc/glance/glance-api.conf

#在[database]部分中,配置数据库访问
[database]
connection = mysql+pymysql://glance:123456@controller/glance

#在该[glance_store]部分中,配置本地文件系统存储和镜像文件的位置:

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

#在[keystone_authtoken]和[paste_deploy]部分,配置身份服务访问

[keystone_authtoken]
www_authenticate_uri  = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 123456

[paste_deploy]
flavor = keystone
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28

3、填充镜像服务数据库:

su -s /bin/sh -c "glance-manage db_sync" glance
  • 1

4、启动 Image 服务并将它们配置为在系统启动时启动

systemctl enable openstack-glance-api.service
systemctl start openstack-glance-api.service
systemctl status openstack-glance-api.service
  • 1
  • 2
  • 3
验证
. admin-openrc
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

glance image-create --name "cirros" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --visibility=public

#有自己上传的镜像
glance image-list
[root@controller ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| 85b6d7d7-a96c-4664-bc87-355f49a28477 | centos |
| 04eda535-0adc-473a-a2d6-a14ab4877b33 | cirros |
+--------------------------------------+--------+
[root@controller ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

placement放置服务(controller)

作用:(在 Stein 发布之前,这个服务与计算 REST API 代码 (nova-api) 一起在 Nova 中)一个资源提供者可以是一个计算节点,共享存储池,或一个IP分配池。placement服务跟踪每个供应商的库存和使用情况。例如,在一个计算节点创建一个实例的可消费资源如计算节点的资源提供者的CPU和内存,磁盘从外部共享存储池资源提供商和IP地址从外部IP资源提供者。

创建placement数据库并授权
mysql -uroot -p
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY '123456';
flush privileges;
quit;
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
创建placement用户、角色和api端点
openstack user create --domain default --password-prompt placement

openstack role add --project service --user placement admin
openstack service create --name placement   --description "Placement API" placement

openstack endpoint create --region RegionOne   placement public http://controller:8778
openstack endpoint create --region RegionOne   placement internal http://controller:8778
openstack endpoint create --region RegionOne   placement admin http://controller:8778
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
安装配置placement

1、下载placement包

yum install openstack-placement-api -y
  • 1

2、编辑/etc/placement/placement.conf文件

cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
egrep -v '^#|^$' /etc/placement/placement.conf.bak > /etc/placement/placement.conf

vi /etc/placement/placement.conf

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = 123456

[placement_database]
connection = mysql+pymysql://placement:123456@controller/placement
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

3、填充placement数据库并查看表

su -s /bin/sh -c "placement-manage db sync" placement
mysql -uroot -p placement  -e 'show tables'
systemctl restart httpd
  • 1
  • 2
  • 3

nova计算服务(核心组件)

controller中的nova

创建nova_apinovanova_cell0数据库并授权
mysql -u root -p
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456';

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';

flush privileges;
quit
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
创建nova的用户、角色api端点
. admin-openrc
openstack user create --domain default --password-prompt nova

openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
安装和配置nova组件

nova-api服务:接收并响应最终用户的计算API调用请求,当其接收到请求后,通常将请求转发给Nova的其他组件进行处理,例如Nova-scheduler。,并管理虚拟机的生命周期,提供了openstackAPI,亚马逊EC2AP以及管理员控制API,nova-api是整个nova的入口。它接受用户请求,将指令发送至消息队列,由相应的服务执行相关的指令信息

nova-compute服务(nova中的核心,真正管理虚拟机):是一个非常重要的守护进程,负责创建和终止虚拟机实例,nova-compute整合了计算资源CPU,存储,网络三类资源部署管理虚拟机,实现计算能力的交付。包括如下内容:运行虚拟机,终止虚拟机,重启虚机,挂载虚拟机,挂载云硬盘,卸载云硬盘,控制台输出等

nova-schedule服务:职责是调度虚拟机在哪个物理宿主机上部署,接受消息队列指令并执行。

nova-conductor模块:介于nova-computer和database之间,目的是调解nova-compute服务和数据库之间的交互。它消除了nova-compute服务对云数据库的直接访问

nova-novncproxy 守护进程:提供用于通过 VNC 连接访问正在运行的实例的代理。支持基于浏览器的 novnc 客户端

1、安装软件包

yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
  • 1

2、编辑/etc/nova/nova.conf

#备份
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
egrep -v '^$|^#' /etc/nova/nova.conf.bak >/etc/nova/nova.conf


vi /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:123456@controller:5672/
my_ip = 172.16.20.2
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api]
auth_strategy = keystone

[api_database]
connection = mysql+pymysql://nova:123456@controller/nova_api

[database]
connection = mysql+pymysql://nova:123456@controller/nova

[glance]
api_servers = http://controller:9292

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54

3、同步数据库

su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
#执行此条可忽略信息
[root@controller nova]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
--transport-url not provided in the command line, using the value [DEFAULT]/transport_url from the configuration file
--database_connection not provided in the command line, using the value [database]/connection from the configuration file
83dd770f-1cba-45f4-a3cb-1640e266acf3


su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
systemctl enable \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service
systemctl start \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service
systemctl status \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service|grep active
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

compute

安装和配置组件
yum install openstack-nova-compute -y

#备份
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
egrep -v '^$|^#' /etc/nova/nova.conf.bak >/etc/nova/nova.conf

vi /etc/nova/nova.conf

[DEFAULT]
#计算节点ip
my_ip = 172.16.20.3
enabled_apis = osapi_compute,metadata
#允许调整实例规格
allow_resize_to_same_host=True
[filter_scheduler]
enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
transport_url = rabbit://openstack:123456@controller
use_neutron = True
compute_driver=libvirt.LibvirtDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123456

[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://172.16.20.2:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456

#确定您的计算节点是否支持虚拟机的硬件加速:
#egrep -c '(vmx|svm)' /proc/cpuinfo
#如果此命令返回值,则您的计算节点支持硬件加速,这通常不需要额外配置。one or greater
#如果此命令返回值zero,则您的计算节点不支持硬件加速,您必须配置libvirt为使用 QEMU 而不是 KVM。

#编辑文件中的[libvirt]部分,/etc/nova/nova.conf如下所示:

[libvirt]
virt_type = qemu

#启动 Compute 服务及其依赖项,并将它们配置为在系统启动时自动启动:

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service


#添加新计算节点时,您必须在控制器节点上运行以注册这些新计算节点。或者,您可以在 中设置适当的间隔 :nova-manage cell_v2 discover_hosts/etc/nova/nova.conf

[scheduler]
discover_hosts_in_cells_interval = 300
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77

验证(controller)

[root@controller ~]# openstack compute service list --service nova-compute
+----+--------------+---------+------+---------+-------+----------------------------+
| ID | Binary       | Host    | Zone | Status  | State | Updated At                 |
+----+--------------+---------+------+---------+-------+----------------------------+
|  8 | nova-compute | compute | nova | enabled | up    | 2021-06-15T01:24:59.000000 |
+----+--------------+---------+------+---------+-------+----------------------------+
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': c542c069-8b94-428c-a378-02b9f74504a1
Checking host mapping for compute host 'compute': 36b36335-6ddd-415a-80e5-291917110c49
Creating host mapping for compute host 'compute': 36b36335-6ddd-415a-80e5-291917110c49
Found 1 unmapped computes in cell: c542c069-8b94-428c-a378-02b9f74504a1
[root@controller ~]# openstack compute service list
+----+----------------+------------+----------+---------+-------+----------------------------+
| ID | Binary         | Host       | Zone     | Status  | State | Updated At                 |
+----+----------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-conductor | controller | internal | enabled | up    | 2021-06-15T01:25:27.000000 |
|  6 | nova-scheduler | controller | internal | enabled | up    | 2021-06-15T01:25:28.000000 |
|  8 | nova-compute   | compute    | nova     | enabled | up    | 2021-06-15T01:25:30.000000 |
+----+----------------+------------+----------+---------+-------+----------------------------+

[root@controller ~]# openstack catalog list
+-----------+-----------+-----------------------------------------+
| Name      | Type      | Endpoints                               |
+-----------+-----------+-----------------------------------------+
| glance    | image     | RegionOne                               |
|           |           |   public: http://controller:9292        |
|           |           | RegionOne                               |
|           |           |   internal: http://controller:9292      |
|           |           | RegionOne                               |
|           |           |   admin: http://controller:9292         |
|           |           |                                         |
| keystone  | identity  | RegionOne                               |
|           |           |   internal: http://controller:5000/v3/  |
|           |           | RegionOne                               |
|           |           |   admin: http://controller:5000/v3/     |
|           |           | RegionOne                               |
|           |           |   public: http://controller:5000/v3/    |
|           |           |                                         |
| placement | placement | RegionOne                               |
|           |           |   internal: http://controller:8778      |
|           |           | RegionOne                               |
|           |           |   admin: http://controller:8778         |
|           |           | RegionOne                               |
|           |           |   public: http://controller:8778        |
|           |           |                                         |
| nova      | compute   | RegionOne                               |
|           |           |   admin: http://controller:8774/v2.1    |
|           |           | RegionOne                               |
|           |           |   internal: http://controller:8774/v2.1 |
|           |           | RegionOne                               |
|           |           |   public: http://controller:8774/v2.1   |
|           |           |                                         |
+-----------+-----------+-----------------------------------------+

#在/etc/httpd/conf.d/00-placement-api.conf添加如下配置
vi /etc/httpd/conf.d/00-placement-api.conf

<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>

[root@controller ~]# systemctl restart httpd

[root@controller ~]# nova-status upgrade check
+-------------------------------------------+
| Upgrade Check Results                     |
+-------------------------------------------+
| Check: Cells v2                           |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Placement API                      |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Ironic Flavor Migration            |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Cinder API                         |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Policy Scope-based Defaults        |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
| Check: Older than N-1 computes            |
| Result: Success                           |
| Details: None                             |
+-------------------------------------------+
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103

neutron(重点)

网络模式:

网络选项 1:提供商网络

网络选项 2:自助服务网络(常用)

controller

建库授权
mysql -u root -p

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';

flush privileges;
quit
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
. admin-openrc
openstack user create --domain default --password-prompt neutron

openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network

openstack endpoint create --region RegionOne network public http://controller:9696

openstack endpoint create --region RegionOne network internal http://controller:9696

openstack endpoint create --region RegionOne network admin http://controller:9696

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
安装配置

1、配置公共组件(Networking 公共组件配置包括身份验证机制、消息队列和插件)

yum install openstack-neutron openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables -y


cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
egrep -v '^$|^#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf

#没有的模块就自己添加(这里没有nova)
vi /etc/neutron/neutron.conf

[DEFAULT]
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:123456@controller
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[database]
connection = mysql+pymysql://neutron:123456@controller/neutron

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123456
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44

2、配置模块化第 2 层 (ML2) 插件

cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
egrep -v '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini

#这个版本只有一个[DEFAULT]
vi /etc/neutron/plugins/ml2/ml2_conf.ini

[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security

[ml2_type_flat]
#flat_networks = provider,gongwang
#后面写*,在dashboard可以创建各种命名的flat网络,例子:填写‘flat_networks = provider,gongwang’时,只能创建flat网络名字为provider和gongwang。但是此地方的名字需要与网桥的名字对应(/etc/neutron/plugins/ml2/linuxbridge_agent.ini)
#[linux_bridge]
#physical_interface_mappings = provider:ens33


flat_networks =*
[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = true
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26

3、配置 Linux 网桥代理

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
egrep -v '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

#这里也要添加模块
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[DEFAULT]
[linux_bridge]
#连接外部网络的那块网卡(PROVIDER_INTERFACE_NAME)
physical_interface_mappings = provider:ens33

[vxlan]
enable_vxlan = true
#控制节点访问外网的IP地址(OVERLAY_INTERFACE_IP_ADDRESS)
local_ip = 172.16.20.2
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


#修改linux内核参数设置为1
echo 'net.bridge.bridge-nf-call-iptables=1' >>/etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables=1'  >>/etc/sysctl.conf

#启用网络桥接器支持,加载 br_netfilter 内核模块
modprobe br_netfilter
sysctl -p
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

4、配置三层代理,第 3 层 (L3) 代理为自助服务虚拟网络提供路由和 NAT 服务

vi /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = linuxbridge
  • 1
  • 2
  • 3
  • 4

5、配置 DHCP 代理,DHCP 代理为虚拟网络提供 DHCP 服务。

vi /etc/neutron/dhcp_agent.ini

[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

6、配置元数据代理,元数据代理向实例提供配置信息,例如凭据(元数据设置密码为123456)

vi /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = 123456
  • 1
  • 2
  • 3
  • 4
  • 5

7、配置 计算服务(nova)以使用 Networking 服务

vi /etc/nova/nova.conf

[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
#此处为neutron密码
password = 123456
service_metadata_proxy = true
#此处为元数据密码
metadata_proxy_shared_secret = 123456
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
完成安装
#创建软连接(网络服务初始化脚本需要一个/etc/neutron/plugin.ini指向 ML2 插件配置文件)
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

#同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

#修改了计算服务的配置,重启计算服务
systemctl restart openstack-nova-api.service

#启动neutron服务
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service  neutron-metadata-agent.service neutron-l3-agent.service

systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service  neutron-metadata-agent.service neutron-l3-agent.service

#确保是五个running
systemctl status neutron-server.service   neutron-linuxbridge-agent.service neutron-dhcp-agent.service   neutron-metadata-agent.service  neutron-l3-agent.service|grep active
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

compute

1、配置公共组件(Networking 公共组件配置包括身份验证机制、消息队列和插件)

yum install openstack-neutron-linuxbridge ebtables ipset -y

#在该[database]部分中,注释掉所有connection选项(不配置),因为计算节点不直接访问数据库
###或者grep '^[a-Z]'  /etc/neutron/neutron.conf
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
egrep -v '^$|^#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
vi /etc/neutron/neutron.conf

[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:123456@controller

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26

2、配置 Linux 网桥代理

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
egrep -v '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini                                                 
[DEFAULT]
[linux_bridge]
#连接外部网络的那块网卡(PROVIDER_INTERFACE_NAME)
physical_interface_mappings = provider:ens33

[vxlan]
enable_vxlan = true
#计算节点访问外网的IP地址(OVERLAY_INTERFACE_IP_ADDRESS)
local_ip = 172.16.20.3
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

#修改linux内核参数设置为1
echo 'net.bridge.bridge-nf-call-iptables=1' >>/etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables=1'  >>/etc/sysctl.conf

#启用网络桥接器支持,加载 br_netfilter 内核模块
modprobe br_netfilter
sysctl -p
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26

3、配置 计算服务(nova)以使用 Networking 服务

vi /etc/nova/nova.conf

[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
#此处为neutron密码
password = 123456
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
完成安装

1、重启计算服务:

#xena版本
#mkdir -p /usr/lib/python3.6/site-packages/instances
systemctl restart openstack-nova-compute.service
  • 1
  • 2
  • 3

2、启动 Linux 网桥代理并将其配置为在系统启动时启动:

systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
systemctl status neutron-linuxbridge-agent.service


  • 1
  • 2
  • 3
  • 4
  • 5
验证(controller)
. admin-openrc
openstack extension list --network
openstack network agent list
[root@controller ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 0ceb1985-3551-4ae1-ad95-3c8e8b1c314a | Metadata agent     | controller | None              | :-)   | UP    | neutron-metadata-agent    |
| 5cdd4020-1d17-4af9-b2ab-2bc64cd9e45d | L3 agent           | controller | nova              | :-)   | UP    | neutron-l3-agent          |
| 661f0e27-8362-4cd7-8740-a44c1ce32bcf | Linux bridge agent | compute    | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 6bcada94-9959-47fe-b063-1a19ad68ce73 | Linux bridge agent | controller | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 8e6cbbed-53fe-443b-9fba-0205acb44c95 | DHCP agent         | controller | nova              | :-)   | UP    | neutron-dhcp-agent        |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

补:ovs(se)

一、Controller配置
1、安装软件
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch ebtables -y

[DEFAULT]
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
state_path = /var/lib/neutron
dhcp_agent_notification = True
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
transport_url = rabbit://openstack:123456@172.16.90.247
[cors]
[database]
connection = mysql+pymysql://neutron:123456@172.16.90.247/neutron
[keystone_authtoken]
www_authenticate_uri = http://172.16.90.247:5000
auth_url = http://172.16.90.247:5000
memcached_servers = 172.16.90.247:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

[nova]
auth_url = http://172.16.90.247:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123456


[root@controller neutron]# egrep -v '^$|^#' dhcp_agent.ini 
[DEFAULT]
interface_driver = openvswitch
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

[root@controller neutron]# egrep -v '^$|^#' l3_agent.ini 
[DEFAULT]
interface_driver = openvswitch
external_network_bridge = 

[root@controller neutron]# egrep -v '^$|^#' metadata_agent.ini 
[DEFAULT]
nova_metadata_host = 172.16.90.247
metadata_proxy_shared_secret = 123456
[cache]

[root@controller neutron]# egrep -v '^$|^#' /etc/neutron/plugins/ml2/ml2_conf.ini 
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true

[root@controller neutron]# egrep -v '^$|^#' /etc/neutron/plugins/ml2/openvswitch_agent.ini 
[DEFAULT]
[agent]
tunnel_types = vxlan
l2_population = True
[ovs]
bridge_mappings = provider:br-provider
local_ip = 172.16.90.247
[securitygroup]
firewall_driver = openvswitch
enable_security_group = true
enable_ipset = true

ovs-vsctl add-br br-provider
ovs-vsctl add-port br-provider ens34

systemctl restart neutron-server.service neutron-openvswitch-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

#####compute
 yum install openstack-neutron-openvswitch ebtables ipset -y

[root@compute neutron]# egrep -v '^$|*#' neutron.conf
[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:123456@172.16.90.247
[cors]
[database]
[keystone_authtoken]
www_authenticate_uri = http://172.16.90.247:5000
auth_url = http://172.16.90.247:5000
memcached_servers = 172.16.90.247:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

[root@compute neutron]# egrep -v '^$|*#' /etc/neutron/plugins/ml2/openvswitch_agent.ini 
[DEFAULT]
[ovs]
local_ip = 172.16.90.248
[agent]
tunnel_types = vxlan
l2_population = True
[securitygroup]

[root@compute neutron]# egrep -v '^$|*#' /etc/neutron/plugins/ml2/ml2_conf.ini 
[DEFAULT]
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types =*
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_vxlan]
vni_ranges = 1:1000

[root@compute neutron]# egrep -v '^$|*#' /etc/nova/nova.conf
[neutron]
auth_url = http://172.16.90.247:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456


systemctl restart openstack-nova-compute.service
systemctl restart neutron-openvswitch-agent.service

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146

horizon (dashboard)web界面服务

(可独立节点,此处装在controller)(dashboard web界面-基于python)

1、下载并修改配置文件

yum install openstack-dashboard -y
  • 1
WEBROOT = '/dashboard/'
ALLOWED_HOSTS = ['*']
CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': '172.16.20.2:11211',
    }
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin"##注意:此默认角色你必须创建,否则无法使用"项目"一栏
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "172.16.20.2"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
#或者如下
#OPENSTACK_KEYSTONE_URL = "http://controller:5000/v3"
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
TIME_ZONE = "Asia/Shanghai"



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
# horizon修改实例密码

/etc/openstack-dashboard/local_settings.py
变更
OPENSTACK_HYPERVISOR_FEATURES = {
...
    'can_set_password': False,
}

/etc/nova/nova.conf
[libvirt]添加
inject_password=true
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

2、添加以下配置,否则会报错

vi /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
  • 1
  • 2
 systemctl restart httpd memcached
  • 1

域:default

账号:admin

密码:123456

启动实例

创建网络

供应商网络创建:
 openstack network create  --share --external \
  --provider-physical-network provider \
  --provider-network-type flat provider-gongwang
  
 #指明物理网络的提供者,provider 与下面neutron的配置文件对应,其中provider是标签,可以更改为其他,但是2个地方必须要统一
#配置文件/etc/neutron/plugins/ml2/ml2_conf.ini中的参数
[ml2_type_flat]
flat_networks = provider
[linux_bridge]
physical_interface_mappings = provider:ens33
--provider-network-type flat 指明这里创建的网络是flat类型,即实例连接到此网络时和物理网络是在同一个网段,无vlan等功能。
最后输入的provider 指定网络的名称 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
openstack subnet create --network provider-gongwang \
  --allocation-pool start=172.16.90.100,end=172.16.90.250 \
  --dns-nameserver 114.114.114.114 --gateway 172.16.90.2 \
  --subnet-range 172.16.90.0/24 provider-gongwang
  • 1
  • 2
  • 3
  • 4
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey

#指定实例类型(几核几G)、镜像、网络id、安全组、密钥
openstack server create \
--flavor m1.nano \
--image cirros \
--nic net-id=5351ad43-b785-4c89-8433-1be99b4f04af \
--security-group default \
provider-instance2



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
自助服务网络上启动实例(三层网络----vpc网络)

自助服务网络使用通常执行双向 NAT 的虚拟路由器连接到提供商网络。每个路由器包含至少一个自助服务网络上的接口和提供商网络上的网关。

注:内外要怎么上网?—>通过路由(路由通过公网ip上外网)

1、创建网络

openstack network create selfservice
  • 1

2、创建实例内网(随便创建)

openstack subnet create --network selfservice \
  --dns-nameserver 8.8.8.8 --gateway 192.168.0.1 \
  --subnet-range 192.168.0.0/24 selfservice
  • 1
  • 2
  • 3

3、创建路由器

openstack router create router
  • 1

4、将自助服务网络子网添加为路由器上的接口

openstack router add subnet router selfservice
  • 1

5、在路由器上的提供商网络上(公网)设置网关:

openstack router set router --external-gateway provider-gongwang
  • 1

6、列出路由器上的端口以确定提供商网络上的网关 IP 地址

 
[root@controller ~]# openstack port list --router router
+--------------------------------------+------+-------------------+------------------------------------------------------------------------------+--------+
| ID                                   | Name | MAC Address       | Fixed IP Addresses                                                           | Status |
+--------------------------------------+------+-------------------+------------------------------------------------------------------------------+--------+
| 43b9be80-6e43-43a3-9eaf-db0253e67a47 |      | fa:16:3e:76:67:e5 | ip_address='172.16.20.230', subnet_id='c5a883a5-a32f-400a-8f19-550b0984ff47' | ACTIVE |
| 81255daa-de1d-44de-9027-6cfc07c30e5a |      | fa:16:3e:17:02:27 | ip_address='192.168.0.1', subnet_id='21d733ec-f1d5-4d1e-a03c-8c25c5c2a7ae'   | ACTIVE |
+--------------------------------------+------+-------------------+------------------------------------------------------------------------------+--------+

保证能ping通'公网'(172.16.20.230)网关
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

7、创建实例

#查看vpc网络 ID
openstack network list
[root@localhost ~]# openstack network list
+--------------------------------------+-----------------+--------------------------------------+
| ID                                   | Name            | Subnets                              |
+--------------------------------------+-----------------+--------------------------------------+
| 2c07f7bb-e60b-4426-b530-d22135327109 | selfservice-vpc | 21d733ec-f1d5-4d1e-a03c-8c25c5c2a7ae |
| 5351ad43-b785-4c89-8433-1be99b4f04af | gongwang        | c5a883a5-a32f-400a-8f19-550b0984ff47 |
+--------------------------------------+-----------------+--------------------------------------+

openstack server create --flavor m1.nano --image cirros \
  --nic net-id=2b7ca5e0-9700-439e-8b4e-b40aa617ea0a --security-group ssh \
  --key-name mykey selfservice-vpc1

#此时只有内外ip,我们需要获取一个浮动ip
openstack floating ip create provider-gongwang
openstack server add floating ip selfservice-vpc1 172.16.90.245
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-4iXOYibZ-1651665356147)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20210615162453924.png)]

swift

(对象存储服务,除controller必须安装安外,存储节点自选,我这里只有两台,存储节点在compute节点)
controller节点
openstack user create --domain default --password-prompt swift

openstack role add --project service --user swift admin
openstack service create --name swift --description "OpenStack Object Storage" object-store

openstack endpoint create --region RegionOne object-store public http://172.16.20.2:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store internal http://172.16.20.2:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store admin http://172.16.20.2:8080/v1

yum install -y openstack-swift-proxy python3-swiftclient python3-keystoneclient python3-keystonemiddleware memcached

curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample

vi /etc/swift/proxy-server.conf

[DEFAULT]
bind_port = 8080
user = swift
swift_dir = /etc/swift

[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken s3api s3token keystoneauth container-quotas account-quotas bulk slo dlo versioned_writes proxy-logging proxy-server
#proxy-logging cache authtoken s3api s3token keystoneauth bulk slo proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = True
[filter:s3api]
use = egg:swift#s3api

[filter:keystoneauth]
use = egg:swift#keystoneauth
#自己keystone创建的管理员和租户的用户
operator_roles = admin,myrole

[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = swift
password = openstack23
delay_auth_decision = True

[filter:cache]
use = egg:swift#memcache
memcache_servers = controller:11211
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
存储节点

1、安装支持的实用程序包:

yum install xfsprogs rsync -y
  • 1

2、将/dev/sdb/dev/sdc设备格式化为 XFS, -f 直接覆盖之前的存储文件

mkfs.xfs -f /dev/sdc1
mkfs.xfs -f /dev/sdc2
mkfs.xfs -f /dev/sdc3
  • 1
  • 2
  • 3

3、创建挂载点目录结构

mkdir -p /srv/node/sdc1
mkdir -p /srv/node/sdc2
mkdir -p /srv/node/sdc3
  • 1
  • 2
  • 3

4、编辑/etc/fstab文件系统信息, 添加内容(也可以用uuid)

blkid#查看uuid
vi /etc/fstab
UUID="451dfd77-0591-4c2e-9ad4-e7b5be131bdb" /srv/node/sdc1 xfs noatime 0 2
UUID="2164b144-17e3-410b-b8df-40342a8d75ac" /srv/node/sdc2 xfs noatime 0 2
UUID="987126dd-98d0-4b87-a0ec-432e0cec06d4" /srv/node/sdc3 xfs noatime 0 2
  • 1
  • 2
  • 3
  • 4
  • 5

5、挂载设备:

mount /srv/node/sdc1
mount /srv/node/sdc2
mount /srv/node/sdc3
  • 1
  • 2
  • 3

6、创建编辑配置文件 /etc/rsyncd.conf

uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
#MANAGEMENT_INTERFACE_IP_ADDRESS为存储节点的ip
address = 172.16.20.3

[account]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/account.lock

[container]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/container.lock

[object]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/object.lock

#centos8少了一些配置文件,启动会报错
#启动文件1
cat > /usr/lib/systemd/system/rsyncd.service << EOF 
[Unit]
Description=fast remote file copy program daemon
ConditionPathExists=/etc/rsyncd.conf

[Service]
EnvironmentFile=/etc/sysconfig/rsyncd
ExecStart=/usr/bin/rsync --daemon --no-detach "$OPTIONS"

[Install]
WantedBy=multi-user.target
EOF

#启动文件2
cat > /etc/sysconfig/rsyncd << EOF
OPTIONS=""
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44

7、启动rsyncd服务并配置它在系统启动时启动:

systemctl enable rsyncd.service
systemctl start rsyncd.service
  • 1
  • 2
8、安装和配置组件(存储节点)
yum install openstack-swift-account openstack-swift-container openstack-swift-object -y
  • 1
curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/account-server.conf-sample

curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/container-server.conf-sample

curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/object-server.conf-sample
  • 1
  • 2
  • 3
  • 4
  • 5

编辑/etc/swift/account-server.conf文件并完成以下操作:

vi /etc/swift/account-server.conf
[DEFAULT]
bind_ip = 172.16.20.3#存储节点ip
bind_port = 6202
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True

[pipeline:main]
pipeline = healthcheck recon account-server

[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

编辑/etc/swift/container-server.conf文件并完成以下操作:

vi /etc/swift/container-server.conf
[DEFAULT]
bind_ip = 172.16.20.3
bind_port = 6201
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True

[pipeline:main]
pipeline = healthcheck recon container-server

[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

编辑/etc/swift/object-server.conf文件并完成以下操作:

vi /etc/swift/object-server.conf
[DEFAULT]
bind_ip = 172.16.20.3
bind_port = 6200
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True

[pipeline:main]
pipeline = healthcheck recon object-server

[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

确保挂载点目录结构的正确所有权

chown -R swift:swift /srv/node
  • 1

创建recon目录并确保其拥有正确的所有权:

mkdir -p /var/cache/swift
chown -R root:swift /var/cache/swift
chmod -R 775 /var/cache/swift
  • 1
  • 2
  • 3
创建容器 ring(controller)

1、切换到/etc/swift目录。创建基础account.builder文件:

cd /etc/swift
#create 10 3 1,这里的写入的3,就必须至少有3个ring才能rebalance
swift-ring-builder account.builder create 10 3 1

#执行会报错,原文件swift_hash_path_suffix = HASH_PATH_PREFIX,识别不出符号,这里直接将HASH_PATH_PREFIX需要替换哈希值,我这里直接写swift,最后生成随机数,或者写入安全的数。
[root@controller swift]# vi swift.conf
[swift-hash]
swift_hash_path_suffix = swift
swift_hash_path_prefix = swift

#重新执行,没有报错
swift-ring-builder account.builder create 10 3 1
swift-ring-builder container.builder create 10 3 1
swift-ring-builder object.builder create 10 3 1
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

2、将每个存储节点的存储设备添加到 ring 中,分别是account(对应配置文件端口:6202)、container(6201)、object(6200)

swift-ring-builder account.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6202 --device sdc1 --weight 100
swift-ring-builder account.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6202 --device sdc2 --weight 100
swift-ring-builder account.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6202 --device sdc3 --weight 100
  • 1
  • 2
  • 3

swift-ring-builder container.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6201 --device sdc1 --weight 100
swift-ring-builder container.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6201 --device sdc2 --weight 100
swift-ring-builder container.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6201 --device sdc3 --weight 100
  • 1
  • 2
  • 3
  • 4
swift-ring-builder object.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6200 --device sdc1 --weight 100
swift-ring-builder object.builder add --region 1 --zone 1 --ip 172.16.20.3 --port 6200 --device sdc2 --weight 100
swift-ring-builder object.builder add --region 1 --zone 2 --ip 172.16.20.3 --port 6200 --device sdc3 --weight 100
  • 1
  • 2
  • 3

3、重新平衡,会生成3个包account.ring.gz,container.ring.gz、object.ring.gz,传至存储节点

swift-ring-builder account.builder rebalance
swift-ring-builder container.builder rebalance
swift-ring-builder object.builder rebalance

scp account.ring.gz container.ring.gz object.ring.gz root@172.16.20.3:/etc/swift
  • 1
  • 2
  • 3
  • 4
  • 5
完成安装(controller)

1、获取配置文件

curl -o /etc/swift/swift.conf https://opendev.org/openstack/swift/raw/branch/master/etc/swift.conf-sample
  • 1

2、编辑/etc/swift/swift.conf文件

vi /etc/swift/swift.conf
[swift-hash]
swift_hash_path_suffix = openstackswift
swift_hash_path_prefix = openstackswift

[storage-policy:0]
name = Policy-0
default = yes
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

3、将该配置文件复制到每个对象存储节点的 /etc/swift 目录,并修改权限

scp /etc/swift/swift.conf root@172.16.20.3:/etc/swift
chown -R root:swift /etc/swift
  • 1
  • 2

4、启动controller节点的服务

systemctl enable openstack-swift-proxy.service memcached.service
systemctl start openstack-swift-proxy.service memcached.service
  • 1
  • 2

5、启动存储节点的服务,并将它们配置为在系统启动时启动

###acconut
systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service

systemctl start openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service

systemctl status openstack-swift-account.service openstack-swift-account-auditor.service   openstack-swift-account-reaper.service openstack-swift-account-replicator.service|grep active

###container
systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service

systemctl start openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service

systemctl status openstack-swift-container.service   openstack-swift-container-auditor.service openstack-swift-container-replicator.service   openstack-swift-container-updater.service|grep active

###object
systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service

systemctl start openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service

systemctl status openstack-swift-object.service openstack-swift-object-auditor.service   openstack-swift-object-replicator.service openstack-swift-object-updater.service|grep active
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
如何增加swiftloopback的大小?
# backup first
cp -a /srv/node/swiftloopback /srv/node/swiftloopback.bak
umount /srv/node/swiftloopback
systemctl list-unit-files | grep enabled|grep swift|awk '{print $1}'|while read service;do systemctl stop $service; done
# add xxx M
dd if=/dev/zero bs=1MiB of=/srv/loopback-device/swiftloopback conv=notrunc oflag=append count=xxx
e2fsck -f /srv/loopback-device/swiftloopback
resize2fs /srv/loopback-device/swiftloopback
mount -a
systemctl list-unit-files | grep enabled|grep swift|awk '{print $1}'|while read service;do systemctl restart $service; done
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
swift管理访问

https://docs.openstack.org/zh_CN/user-guide/cli-swift-manage-access-swift.html

用户具有账户角色。例如,一个具有管理员角色的用户具有对所有容器和对象的完全访问权限。可以在容器级别设置访问控制(AVLs), 具体通过设置X-Container-ReadX-Container-Write 请求头提供读写权限。

To give a user read access, use the swift post command with the -r parameter. To give a user write access, use the -w parameter.

以下是read容器的ACLs的示例:

带有任意HTTP referer请求头的请求可以读取容器内容:

$ swift post CONTAINER -r ".r:*"
  • 1

带有任意HTTP referer请求头的请求可以读取并列出容器内容:

$ swift post CONTAINER -r ".r:*,.rlistings"
  • 1

用于读取容器内容的合法的HTTP referer 请求头列表:

$ swift post CONTAINER -r ".r:openstack.example.com,.r:swift.example.com,.r:storage.example.com"
  • 1

用于读取容器内容的非法的HTTP referer 请求头列表:

$ swift post CONTAINER -r ".r:*,.r:-openstack.example.com,.r:-swift.example.com,.r:-storage.example.com"
  • 1

project1中的所有用户都可以读取容器内容:

$ swift post CONTAINER -r "project1:*"
  • 1

project1中的User1可以读取容器内容:

$ swift post CONTAINER -r "project1:user1"
  • 1

允许读取容器内容用户和项目:

$ swift post CONTAINER -r "project1:user1,project1:user2,project3:*,project4:user1"
  • 1

以下是容器的write` ACLs示例:

project1中的所有用户都可以写入容器内容:

$ swift post CONTAINER -w "project1:*"
  • 1

project1中的User1可以写入容器内容:

$ swift post CONTAINER -w "project1:user1"
  • 1

允许写入容器的特定用户和项目的列表:

$ swift post CONTAINER -w "project1:user1,project1:user2,project3:*,project4:user1"
  • 1
有关s3api

https://docs.openstack.org/swift/latest/middleware.html

亚马逊对象存储连接(参照华为云)

https://support.huaweicloud.com/api-dgc/dgc_02_0279.html

介绍

通过亚马逊对象存储连接,可以对亚马逊对象存储S3抽取文件,支持CSV、JSON和二进制格式。

连接样例
{
  "links": [
    {
      "link-config-values": {
        "configs": [
          {
            "inputs": [
              {
                "name": "linkConfig.storageType",
                "value": "S3"
              },
              {
                "name": "linkConfig.accessKey",
                "value": "AKIAIPRxxxxxHYWEGDWQ"
              },
              {
                "name": "linkConfig.securityKey",
                "value": "Add password here"
              }
            ],
            "name": "linkConfig"
          }
        ]
      },
      "name": "thirdpartylink",
      "connector-name": "thirdparty-obs-connector"
    }
  ]
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

cinder

controller

mysql -u root -p

CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '123456';

flush privileges;
quit
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
. admin-openrc
openstack user create --domain default --password-prompt cinder

openstack role add --project service --user cinder admin
openstack service create --name cinderv2   --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3   --description "OpenStack Block Storage" volumev3

openstack endpoint create --region RegionOne   volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne   volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne   volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne   volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne   volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne   volumev3 admin http://controller:8776/v3/%\(project_id\)s
yum install openstack-cinder -y
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
egrep -v '^$|^#' /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf

vi /etc/cinder/cinder.conf
[DEFAULT]
default_volume_type = lvm-HHD
my_ip = 172.16.20.2
auth_strategy = keystone
transport_url = rabbit://openstack:123456@controller

[database]
connection = mysql+pymysql://cinder:123456@controller/cinder

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 123456


[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

vi /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne

su -s /bin/sh -c "cinder-manage db sync" cinder
systemctl restart openstack-nova-api.service
systemctl restart httpd memcached

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53

compute(存储节点)

LVM
yum install lvm2 device-mapper-persistent-data -y
systemctl list-unit-files |grep lvm

#确认磁盘内没有数据或者是不需要的数据时执行,命令清空磁盘:mkfs.xfs -f /dev/sdd

pvcreate /dev/sdd
vgcreate cinder-volumes /dev/sdd
#vgcreate cinder-ssd /dev/sdb
#vgcreate cinder-sata /dev/sdc

vi /etc/lvm/lvm.conf
devices {
filter = [ "a/sdb/", "r/.*/"]

#如果您的存储节点在操作系统磁盘上使用 LVM,您还必须将关联的设备添加到过滤器中。例如,如果/dev/sda设备包含操作系统:
#filter = [ "a/sda/", "a/sdb/", "r/.*/"]

#同样,如果您的计算节点在操作系统磁盘上使用 LVM,您还必须修改/etc/lvm/lvm.conf这些节点上文件中的过滤器 以仅包含操作系统磁盘。例如,如果/dev/sda 设备包含操作系统:
#filter = [ "a/sda/", "r/.*/"]


yum install openstack-cinder targetcli python3-keystone -y

cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
egrep -v '^$|^#' /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf

vi /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
auth_strategy = keystone
my_ip = 172.16.90.201
#enabled_backends = lvm
enabled_backends = lvm,ssd,sata
glance_api_servers = http://controller:9292

[database]
connection = mysql+pymysql://cinder:123456@controller/cinder

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 123456

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
vomlume_backend_name = lvm

[ssd]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-ssd
target_protocol = iscsi
target_helper = lioadm
vomlume_backend_name = ssd

[sata]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-sata
target_protocol = iscsi
target_helper = lioadm
vomlume_backend_name = sata

备份服务(存储节点)
vi /etc/cinder/cinder.conf
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = http://172.16.20.2:8080/v1/AUTH_
backup_swift_auth = per_user
backup_swift_auth_version = 1
backup_swift_user = swift
backup_swift_key = 123456
backup_swift_container = volumebackups
backup_swift_object_size = 52428800 
backup_swift_retry_attempts = 3
backup_swift_retry_backoff = 2
backup_compression_algorithm = zlib

替换SWIFT_URL为对象存储服务的 URL。可以通过显示对象存储 API 端点找到 URL:
openstack catalog show object-store
补:
#lsblk -d -o name,rota
0为ssd,1为HHD
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service
systemctl enable openstack-cinder-backup.service
systemctl start openstack-cinder-backup.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96

卷类型

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-c5qACOG3-1651665356150)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20210628094747931.png)]

#在存储节点配置的
vomlume_backend_name = lvm
#vomlume_backend_name = ssd
#vomlume_backend_name = sata
  • 1
  • 2
  • 3
  • 4

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-nS0WAnqf-1651665356153)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20210628095024092.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-IEyqwQ4p-1651665356155)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20210628095332070.png)]

ceph
mkfs.xfs /dev/sdb
mkfs.xfs /dev/sdc

ssh-keygen -t rsa
ssh-copy-id controller
ssh-copy-id compute

git clone -b stable-5.0 https://github.com/ceph/ceph-ansible.git
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
yum install -y python3-netaddr
yum install -y ansible

vi /etc/ansible/ansible.cfg
host_key_checking = False

vi /etc/ansible/hosts
[mons]
172.16.20.2
172.16.90.201

[osds]
172.16.20.2
172.16.90.201

[mgrs]
172.16.20.2
172.16.90.201

[rgws]
172.16.20.2
172.16.90.201

[clients]
172.16.20.2

cd ceph-ansible/group_vars/
cp all.yml.sample all.yml
vi all.yml

---
dummy:
ceph_release_num:
  octopus: 15
cluster: ceph
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
client_group_name: clients
mgr_group_name: mgrs
configure_firewall: false
ceph_origin: repository
ceph_repository: community
ceph_mirror: https://mirrors.aliyun.com/ceph/
ceph_stable_key: https://mirrors.aliyun.com/ceph/keys/release.asc
ceph_stable_release: octopus
ceph_stable_repo: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}"
cephx: true
copy_admin_key: true
monitor_interface: ens33
monitor_address_block: 172.16.90.0/24
ip_version: ipv4
public_network: 172.16.90.0/24
cluster_network: 172.16.90.0/24
osd_objectstore: bluestore
osd_auto_discovery: true
radosgw_civetweb_port: 8080
radosgw_civetweb_num_threads: 512
radosgw_interface: ens33
dashboard_enabled: false

cat all.yml|egrep -v '^$|^#'
for i in {mons,osds,mgrs,mdss};do cp $i.yml.saple $i.yml;done
for i in {mons,osds,mgrs,mdss};do cp $i.yml.sample $i.yml;done
cd ..
ansible-playbook -i /etc/ansible/hosts site.yml.sample
cat group_vars/all.yml|egrep -v '^$|^#'
vi group_vars/all.yml
cat group_vars/all.yml|egrep -v '^$|^#'
ansible-playbook -i /etc/ansible/hosts site.yml.sample

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81

ceph对接glance,nova,cinder

#openstack所有节点
yum install -y python3-rbd
yum install -y ceph-common

#在ceph的主节点创建三个POOL(合理取值)
ceph osd pool create volumes 128
ceph osd pool create images 128
ceph osd pool create vms 128

#初始化POOL
rbd pool init volumes
rbd pool init images
rbd pool init vms

#将ceph的配置文件导入到云平台的openstack的所有节点
#在ceph集群的主节点上执行
ssh root@172.16.100.2  tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf
ssh root@172.16.100.3  tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf

#创建ceph用户和密钥(ceph主节点)
#这里先更新caps,以防报错
ceph auth caps client.glance mon 'profile rbd' osd 'profile rbd pool=images'
ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images'

ceph auth caps client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes,profile rbd pool=vms,profile rbd-read-only pool=images'
ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes,profile rbd pool=vms,profile rbd-read-only pool=images'

ceph auth caps client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups'
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups'

#在ceph集群中写入文件到openstack节点(这里演示,只用了两个密钥,后续可以一对一密钥设置)(ceph主节点)
#要对应服务所在位置,例:我的glance在控制节点,就传到控制节点上
ceph auth get-or-create client.glance | ssh root@172.16.100.2  tee /etc/ceph/ceph.client.glance.keyring
ssh root@172.16.100.2 chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh root@172.16.100.3  tee /etc/ceph/ceph.client.cinder.keyring
ssh root@172.16.100.3  chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
ceph对接glance
#修改/etc/glance/glance-api.conf文件
[DEFAULT]
default_store = rbddefault_store = rbd
show_image_direct_url = True
show_image_direct_url = True
[glance_store]
#注释原有的位置
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8

systemctl restart openstack-glance-api.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
ceph对接nova
#生成一个uuid
[root@compute ceph]# uuidgen
1b70a61f-c951-42e3-9174-78fd58724719
#查看密钥
[root@compute ceph]# cat ceph.client.cinder.keyring 
[client.cinder]
	key = AQB9bDVh0HLOOhAASHAWokyBdpiZwoC44VD1UA==

#编辑一个secret.xml文档 
<secret ephemeral='no' private='no'>
  <uuid>1b70a61f-c951-42e3-9174-78fd58724719</uuid>
  <usage type='ceph'>
    <name>client.cinder secret</name>
  </usage>
</secret> 
virsh secret-define --file secret.xml

virsh secret-set-value --secret 1b70a61f-c951-42e3-9174-78fd58724719 --base64 AQB9bDVh0HLOOhAASHAWokyBdpiZwoC44VD1UA==
#查看是否生成标识,有则忽略以上报错
[root@compute ceph]# virsh secret-list
 UUID                                   Usage
-------------------------------------------------------------------
 545ec73d-06b8-462e-8494-cbf1317dbc1a   ceph client.cinder secret

##添加计算节点的ceph.conf
vi /etc/ceph/ceph.conf
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring

#修改配置文件
vi /etc/nova/nova.conf
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 1b70a61f-c951-42e3-9174-78fd58724719
disk_cachemodes="network=writeback"

systemctl restart libvirtd.service openstack-nova-compute.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
ceph对接cinder
vi /etc/cinder/cinder.conf
[DEFAULT]
enabled_backends = ceph

[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
#用的上面的uuid
rbd_secret_uuid = 1b70a61f-c951-42e3-9174-78fd58724719


systemctl restart openstack-cinder-volume.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19

zun

controller

mysql -uroot -p

CREATE DATABASE zun;
GRANT ALL PRIVILEGES ON zun.* TO 'zun'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON zun.* TO 'zun'@'%' IDENTIFIED BY '123456';

flush privileges;
quit
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
. admin-openrc
openstack user create --domain default --password-prompt zun

openstack role add --project service --user zun admin
openstack service create --name zun --description "Container Service" container

openstack endpoint create --region RegionOne container public http://controller:9517/v1
openstack endpoint create --region RegionOne container internal http://controller:9517/v1
openstack endpoint create --region RegionOne container admin http://controller:9517/v1

#Docker libnetwork 驱动程序,它使用Neutron 提供网络服务

openstack user create --domain default --password-prompt kuryr

openstack role add --project service --user kuryr admin

groupadd --system zun
useradd --home-dir "/var/lib/zun" --create-home --system --shell /bin/false -g zun zun
mkdir -p /etc/zun
chown zun:zun /etc/zun

yum install python3-pip git python3-devel libffi-devel gcc openssl-devel -y
cd /var/lib/zun
git clone -b stable/victoria https://opendev.org/openstack/zun.git
chown -R zun:zun zun
cd zun
#使用国内pip/pip3源
mkdir ~/.pip
vim ~/.pip/pip.conf
[global]
index-url = http://mirrors.aliyun.com/pypi/simple/
[install]
trusted-host=mirrors.aliyun.com

pip3 install -r requirements.txt
python3 setup.py install

#生成示例配置文件
su -s /bin/sh -c "oslo-config-generator --config-file etc/zun/zun-config-generator.conf" zun
su -s /bin/sh -c "cp etc/zun/zun.conf.sample /etc/zun/zun.conf" zun
#复制 api-paste.ini
su -s /bin/sh -c "cp etc/zun/api-paste.ini /etc/zun" zun

cp /etc/zun/zun.conf /etc/zun/zun.conf.bak
egrep -v '^$|^#' /etc/zun/zun.conf.bak >/etc/zun/zun.conf
vi /etc/zun/zun.conf
[DEFAULT]
transport_url = rabbit://openstack:123456@controller

[api]
host_ip = 172.16.20.2
port = 9517

[database]
connection = mysql+pymysql://zun:123456@controller/zun

[keystone_auth]
memcached_servers = controller:11211
www_authenticate_uri = http://controller:5000
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = zun
auth_url = http://controller:5000
auth_type = password
auth_version = v3
auth_protocol = http
service_token_roles_required = True
endpoint_type = internalURL

[keystone_authtoken]
memcached_servers = controller:11211
www_authenticate_uri = http://controller:5000
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = zun
auth_url = http://controller:5000
auth_type = password
auth_version = v3
auth_protocol = http
service_token_roles_required = True
endpoint_type = internalURL

[oslo_concurrency]
lock_path = /var/lib/zun/tmp

[oslo_messaging_notifications]
driver = messaging

[websocket_proxy]
wsproxy_host = 172.16.20.2
wsproxy_port = 6784
base_url = ws://controller:6784/

chown zun:zun /etc/zun/zun.conf
su -s /bin/sh -c "zun-db-manage upgrade" zun

vi /etc/systemd/system/zun-api.service
[Unit]
Description = OpenStack Container Service API

[Service]
ExecStart = /usr/local/bin/zun-api
User = zun

[Install]
WantedBy = multi-user.target

vi /etc/systemd/system/zun-wsproxy.service
[Unit]
Description = OpenStack Container Service Websocket Proxy

[Service]
ExecStart = /usr/local/bin/zun-wsproxy
User = zun

[Install]
WantedBy = multi-user.target

systemctl enable zun-api zun-wsproxy
systemctl start zun-api zun-wsproxy
systemctl status zun-api zun-wsproxy
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125

compute

1、安装docker


卸载旧版本docker,
yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine

yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum-config-manager --enable docker-ce-nightly
yum install docker-ce docker-ce-cli containerd.io -y

systemctl start docker
systemctl enable docker


一、主要工作情况
1、	学习使用aws平台部署openstack V版本(组件:   keystone,nova,glance,neutron,swift,zun,cinder,ec2-api,octavia等组件)
2、	写出部署集群文档、写部署openstack脚本和批量化操作的脚本
3、	配合研发使用openstack api接口
4、	参加和协作金砖比赛
但由于自己技术原因,一些工作做的不够理想,进程也相对较慢,这些还得自己多去历练和学习。
    二、工作的总结
伴随着比较紧凑又略显紧张的工作节奏,2021年就这样快接近尾声,虽然我来公司时间还不太长,经过这一段时间的工作,还是学到了很多东西,希望自己在技术方面还需要提高。其次,我看到公司的各部门同事在自己的岗位上都非常认真尽职。但有时候部门之间的协调沟通上还需要加强,有缺点发展的空间会更大。最后也祝公司今后能发展的更好。
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

2、安装kuryr-libnetwork

groupadd --system kuryr
useradd --home-dir "/var/lib/kuryr" --create-home --system --shell /bin/false -g kuryr kuryr
mkdir -p /etc/kuryr
chown kuryr:kuryr /etc/kuryr

cd /var/lib/kuryr
git clone -b stable/victoria https://opendev.org/openstack/kuryr-libnetwork.git
chown -R kuryr:kuryr kuryr-libnetwork
cd kuryr-libnetwork
pip3 install -r requirements.txt
python3 setup.py install

su -s /bin/sh -c "./tools/generate_config_file_samples.sh" kuryr
su -s /bin/sh -c "cp etc/kuryr.conf.sample /etc/kuryr/kuryr.conf" kuryr

cp /etc/kuryr/kuryr.conf /etc/kuryr/kuryr.conf.bak
egrep -v '^$|^#' /etc/kuryr/kuryr.conf.bak >/etc/kuryr/kuryr.conf
vi /etc/kuryr/kuryr.conf

[DEFAULT]
bindir = /usr/local/libexec/kuryr

[neutron]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
username = kuryr
user_domain_name = default
password = 123456
project_name = service
project_domain_name = default
auth_type = password

vi /etc/systemd/system/kuryr-libnetwork.service

[Unit]
Description = Kuryr-libnetwork - Docker network plugin for Neutron

[Service]
ExecStart = /usr/local/bin/kuryr-server --config-file /etc/kuryr/kuryr.conf
CapabilityBoundingSet = CAP_NET_ADMIN
AmbientCapabilities = CAP_NET_ADMIN

[Install]
WantedBy = multi-user.target

systemctl enable kuryr-libnetwork
systemctl start kuryr-libnetwork
systemctl status kuryr-libnetwork
systemctl restart docker
systemctl status docker

#创建docker网络
docker network create --driver kuryr --ipam-driver kuryr \
      --subnet 10.10.0.0/16 --gateway=10.10.0.1 test_net
docker network ls

groupadd --system zun
useradd --home-dir "/var/lib/zun" --create-home --system --shell /bin/false -g zun zun
mkdir -p /etc/zun
chown zun:zun /etc/zun
mkdir -p /etc/cni/net.d
chown zun:zun /etc/cni/net.d

yum install python3-pip git python3-devel libffi-devel gcc openssl-devel numactl

cd /var/lib/zun
git clone -b stable/victoria https://opendev.org/openstack/zun.git
chown -R zun:zun zun
cd zun
pip3 install -r requirements.txt
python3 setup.py install

su -s /bin/sh -c "oslo-config-generator --config-file etc/zun/zun-config-generator.conf" zun
su -s /bin/sh -c "cp etc/zun/zun.conf.sample /etc/zun/zun.conf" zun
su -s /bin/sh -c "cp etc/zun/rootwrap.conf /etc/zun/rootwrap.conf" zun
su -s /bin/sh -c "mkdir -p /etc/zun/rootwrap.d" zun
su -s /bin/sh -c "cp etc/zun/rootwrap.d/* /etc/zun/rootwrap.d/" zun
su -s /bin/sh -c "cp etc/cni/net.d/* /etc/cni/net.d/" zun

echo "zun ALL=(root) NOPASSWD: /usr/local/bin/zun-rootwrap /etc/zun/rootwrap.conf *" | sudo tee /etc/sudoers.d/zun-rootwrap
cp /etc/zun/zun.conf /etc/zun/zun.conf.bak
egrep -v '^$|^#' /etc/zun/zun.conf.bak >/etc/zun/zun.conf

vi /etc/zun/zun.conf
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
state_path = /var/lib/zun

[compute]
host_shared_with_nova = true

[database]
connection = mysql+pymysql://zun:123456@controller/zun

[keystone_auth]
memcached_servers = controller:11211
www_authenticate_uri = http://controller:5000
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = zun
auth_url = http://controller:5000
auth_type = password
auth_version = v3
auth_protocol = http
service_token_roles_required = True
endpoint_type = internalURL

[keystone_authtoken]
memcached_servers = controller:11211
www_authenticate_uri= http://controller:5000
project_domain_name = default
project_name = service
user_domain_name = default
password = 123456
username = zun
auth_url = http://controller:5000
auth_type = password

[oslo_concurrency]
lock_path = /var/lib/zun/tmp


chown zun:zun /etc/zun/zun.conf
mkdir -p /etc/systemd/system/docker.service.d
vi /etc/systemd/system/docker.service.d/docker.conf

[Service]
ExecStart=
ExecStart=/usr/bin/dockerd --group zun -H tcp://compute:2375 -H unix:///var/run/docker.sock --cluster-store etcd://controller:2379

systemctl daemon-reload
systemctl restart docker

vi /etc/kuryr/kuryr.conf
[DEFAULT]
capability_scope = global
process_external_connectivity = False

systemctl restart kuryr-libnetwork

containerd config default > /etc/containerd/config.toml
#zun组id=ZUN_GROUP_ID,使用下面命令查看
getent group zun | cut -d: -f3
[root@compute zun]# getent group zun | cut -d: -f3
976


vi /etc/containerd/config.toml
[grpc]
#配置gid为zun用户的组 ID
gid = 976

chown zun:zun /etc/containerd/config.toml
systemctl restart containerd
mkdir -p /opt/cni/bin

curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz | tar -C /opt/cni/bin -xzvf - ./loopback

install -o zun -m 0555 -D /usr/local/bin/zun-cni /opt/cni/bin/zun-cni

vi /etc/systemd/system/zun-compute.service
[Unit]
Description = OpenStack Container Service Compute Agent

[Service]
ExecStart = /usr/local/bin/zun-compute
User = zun

[Install]
WantedBy = multi-user.target

vi /etc/systemd/system/zun-cni-daemon.service

[Unit]
Description = OpenStack Container Service CNI daemon

[Service]
ExecStart = /usr/local/bin/zun-cni-daemon
User = zun

[Install]
WantedBy = multi-user.target

systemctl enable zun-compute zun-cni-daemon
systemctl start zun-compute zun-cni-daemon
systemctl status zun-compute zun-cni-daemon

pip3 install python-zunclient
验证
openstack appcontainer service list
[root@controller ~]# openstack appcontainer service list
+----+---------+-------------+-------+----------+-----------------+----------------------------+-------------------+
| Id | Host    | Binary      | State | Disabled | Disabled Reason | Updated At                 | Availability Zone |
+----+---------+-------------+-------+----------+-----------------+----------------------------+-------------------+
|  1 | compute | zun-compute | up    | False    | None            | 2021-06-28T09:25:05.000000 | nova              |
+----+---------+-------------+-------+----------+-----------------+----------------------------+-------------------+
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198

安装zun-ui可用dashboard访

git clone -b stable/victoria https://git.openstack.org/openstack/zun-ui
ls
cp zun-ui/zun_ui/enabled/* /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/


pip3 install zun-ui
systemctl restart httpd memcached
systemctl status httpd memcached
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

manila

CREATE DATABASE manila;
授予对`manila`数据库的适当访问权限:
GRANT ALL PRIVILEGES ON manila.* TO 'manila'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON manila.* TO 'manila'@'%' IDENTIFIED BY '123456';


openstack user create --domain default --password-prompt manila
openstack role add --project service --user manila admin
openstack service create --name manila --description "OpenStack Shared File Systems" share
openstack service create --name manilav2 --description "OpenStack Shared File Systems V2" sharev2
openstack endpoint create --region RegionOne share public http://controller:8786/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne share internal http://controller:8786/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne share admin http://controller:8786/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne sharev2 public http://controller:8786/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne sharev2 internal http://controller:8786/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne sharev2 admin http://controller:8786/v2/%\(tenant_id\)s

yum install openstack-manila python3-manilaclient -y
cp /etc/manila/manila.conf /etc/manila/manila.conf.bak
egrep -v '^$|^#' /etc/manila/manila.conf.bak >/etc/manila/manila.conf
vi /etc/manila/manila.conf
[DEFAULT]
auth_strategy = keystone
my_ip = 172.16.20.2
default_share_type = default_share_type
share_name_template = share-%s
rootwrap_config = /etc/manila/rootwrap.conf
api_paste_config = /etc/manila/api-paste.ini
transport_url = rabbit://openstack:123456@controller

[database]
connection = mysql+pymysql://manila:123456@controller/manila

[keystone_authtoken]
memcached_servers = controller:11211
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = manila
password = 123456

[oslo_concurrency]
lock_path = /var/lock/manila

su -s /bin/sh -c "manila-manage db sync" manila


systemctl enable openstack-manila-api.service openstack-manila-scheduler.service
systemctl start openstack-manila-api.service openstack-manila-scheduler.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
共享节点
yum install openstack-manila-share python3-PyMySQL
vi /etc/manila/manila.conf
[database]
...
connection = mysql+pymysql://manila:123456@controller/manila
 
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
default_share_type = default_share_type
rootwrap_config = /etc/manila/rootwrap.conf
auth_strategy = keystone
my_ip = 172.16.20.3
 
[keystone_authtoken]   
...
memcached_servers = controller:11211
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = manila
password = 123456
 
[oslo_concurrency]
lock_path = /var/lib/manila/tmp
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27

ec2-api

mysql -u root -p
CREATE DATABASE ec2api;
GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON ec2api.* TO 'ec2api'@'%' IDENTIFIED BY '123456';
exit;

openstack user create --domain default --password-prompt ec2api
openstack role add --project service --user ec2api admin
openstack service create --name ec2-api --description "ec2api" ec2api
openstack endpoint create --region RegionOne ec2api public http://172.16.100.2:8788
openstack endpoint create --region RegionOne ec2api admin http://172.16.100.2:8788
openstack endpoint create --region RegionOne ec2api internal http://172.16.100.2:8788

yum install -y openstack-ec2*

[DEFAULT]
#openstack network list查看的外网名字
external_network = provider-waiwang
ec2_port = 8788
ec2api_listen_port = 8788
transport_url = rabbit://openstack:123456@172.16.100.2:5672/
keystone_ec2_tokens_url = http://172.16.100.2:5000/identity/v3/ec2tokens
api_paste_config = /etc/ec2api/api-paste.ini
disable_ec2_classic = True
log_file=/var/log/ec2api/ec2api.log
default_flavor=m1.nano
s3_region = RegionOne
s3_url = http://172.16.100.2:8080/v1/AUTH_7fa1c49fb2764440abaf4e936e8a14d7
[None]
[database]
connection = mysql+pymysql://ec2api:123456@172.16.100.2/ec2api
[keystone_authtoken]
www_authenticate_uri = http://172.16.100.2:5000/
auth_url = http://172.16.100.2:5000/
memcached_servers = 172.16.100.2:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = 123456
username = ec2api
auth_type = password
[metadata]
[oslo_concurrency]
lock_path=/var/lib/ec2api/tmp
[cache]
enabled = True

su -s /bin/sh -c "ec2-api-manage db_sync" ec2api
vi /etc/neutron/metadata_agent.ini
nova_metadata_port = 8789

mkdir /var/lib/ec2api
chown ec2api:ec2api /var/lib/ec2api

systemctl list-unit-files |grep ec2
systemctl restart openstack-ec2-api.service openstack-ec2-api-metadata.service
systemctl enable openstack-ec2-api.service openstack-ec2-api-metadata.service
pip3 install --upgrade pip
pip3 install awscli

aws_access_key_id 和 aws_secret_acces_key 可以通过“ openstack ec2 credentials list ”命令获得
openstack ec2 credentials create
openstack ec2 credentials list
+----------------------------------+----------------------------------+----------------------------------+----------------------------------+
| Access                           | Secret                           | Project ID                       | User ID                          |
+----------------------------------+----------------------------------+----------------------------------+----------------------------------+
| 81f529ddb5b94a02a349ea190b9fcc60 | 6ca85e0e26724115a3129e5cc1618ccb | d979cb818be3441c86d78a6a41540e20 | 7b51e271f9d849b2839030ece80cb2bc |
+----------------------------------+----------------------------------+----------------------------------+---------


在主目录中~/.aws/config或通过“ aws configure ”命令为 aws cli 创建配置文件 :
[default]
aws_access_key_id = 81f529ddb5b94a02a349ea190b9fcc60
aws_secret_access_key = 6ca85e0e26724115a3129e5cc1618ccb
region = RegionOne


aws --endpoint-url http://172.16.100.2:8788 ec2 describe-images

aws --endpoint-url http://172.16.100.2:8788 ec2 describe-instances
aws --endpoint-url http://172.16.100.2:8788 ec2 reboot-instances i-c3c1882d
aws --endpoint-url http://172.16.100.2:8788 ec2 stop-instances --instance-ids i-c3c1882d
aws --endpoint-url http://172.16.100.2:8788 ec2 start-instances --instance-ids i-c3c1882d
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83

heat

mysql -uroot -p123456 -e "CREATE DATABASE if not exists heat;"
mysql -uroot -p123456 -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY '123456';"
mysql -uroot -p123456 -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY '123456';"
mysql -uroot -p123456 -e "flush privileges;"

openstack user create --domain default --password-prompt heat
openstack role add --project service --user heat admin
openstack service create --name heat   --description "Orchestration" orchestration
openstack service create --name heat-cfn   --description "Orchestration"  cloudformation

openstack endpoint create --region RegionOne orchestration public http://172.16.100.2:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration internal http://172.16.100.2:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration admin http://172.16.100.2:8004/v1/%\(tenant_id\)s

openstack endpoint create --region RegionOne cloudformation public http://172.16.100.2:8000/v1
openstack endpoint create --region RegionOne cloudformation internal http://172.16.100.2:8000/v1
openstack endpoint create --region RegionOne cloudformation admin http://172.16.100.2:8000/v1

openstack domain create --description "Stack projects and users" heat
openstack user create --domain heat --password-prompt heat_domain_admin
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
openstack role create heat_stack_owner
openstack role add --project demo --user demo heat_stack_owner
openstack role create heat_stack_user

yum install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine -y

openstack-config --set /etc/heat/heat.conf database connection mysql+pymysql://heat:123456@172.16.100.2/heat
openstack-config --set /etc/heat/heat.conf DEFAULT transport_url rabbit://openstack:123456@172.16.100.2
openstack-config --set /etc/heat/heat.conf DEFAULT heat_metadata_server_url http://172.16.100.2:8000
openstack-config --set /etc/heat/heat.conf DEFAULT heat_waitcondition_server_url http://172.16.100.2:8000/v1/waitcondition
openstack-config --set /etc/heat/heat.conf DEFAULT stack_domain_admin heat_domain_admin
openstack-config --set /etc/heat/heat.conf DEFAULT stack_domain_admin_password 123456
openstack-config --set /etc/heat/heat.conf DEFAULT stack_user_domain_name heat
openstack-config --set /etc/heat/heat.conf keystone_authtoken www_authenticate_uri http://172.16.100.2:5000
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_url http://172.16.100.2:5000
openstack-config --set /etc/heat/heat.conf keystone_authtoken memcached_servers 172.16.100.2:11211
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_type password
openstack-config --set /etc/heat/heat.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/heat/heat.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/heat/heat.conf keystone_authtoken project_name service
openstack-config --set /etc/heat/heat.conf keystone_authtoken username heat
openstack-config --set /etc/heat/heat.conf keystone_authtoken password 123456
openstack-config --set /etc/heat/heat.conf trustee auth_type password
openstack-config --set /etc/heat/heat.conf trustee auth_url http://172.16.100.2:5000
openstack-config --set /etc/heat/heat.conf trustee username heat
openstack-config --set /etc/heat/heat.conf trustee password 123456
openstack-config --set /etc/heat/heat.conf trustee user_domain_name default
openstack-config --set /etc/heat/heat.conf clients_keystone auth_uri http://172.16.100.2:5000


su -s /bin/sh -c "heat-manage db_sync" heat

systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
systemctl start openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service

pip3 install heat-dashboard
cp -r /usr/local/lib/python3.6/site-packages/heat_dashboard/enabled/* /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/
systemctl restart httpd.service memcached.service

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60

openstack API:

https://docs.openstack.org/zh_CN/api-quick-start/api-quick-start.html

kvm镜像制作

yum -y install qemu-kvm  qemu-img   libvirt  bridge-utils

yum install virt-*
systemctl enable libvirtd
systemctl start libvirtd
echo "net.ipv4.ip_forward = 1" >>/etc/sysctl.conf
sysctl -p
#安装web界面
yum install -y virt-* libvirt qemu-img
systemctl enable libvirtd
systemctl start libvirtd
yum install cockpit-machines
yum install cockpit -y
systemctl enable --now cockpit.socket
#使用 9090 端口通过浏览器访问主机。登录时本机的用户名和密码。如:root  123456

mkdir kvm

qemu-img create -f qcow2  /root/kvm/c8.qcow2 20G

#c88随意去名字,c8.ios是我上传的centos8.ios镜像,c8.qcow2是上面创建的文件夹

virt-install --name c88 --ram 2048 --os-type linux --os-variant rhel8.0 --arch=x86_64 --network network=default,model=virtio --disk path=/root/kvm/c8.qcow2,format=qcow2 --location /root/kvm/c8.ios --console pty,target_type=serial --graphics vnc,listen=0.0.0.0,port=7788



#urtla VNC登录vnc,配置镜像,reboot重启

virsh start c88   #启动这台虚拟机
virsh list
#直接使用/root/kvm/下的c8.qcow2镜像

#ceph支持raw格式,需要转
qemu-img convert -f qcow2 -O raw c8.qcow2 c8.raw

#windows
virt-install -n win12-R2 -r 2048 --vcpus=2 --os-type=windows --accelerate --boot cdrom,hd,menu=on -c /root/kvm/cn_windows_server_2012_r2_x64_dvd_2707961.iso --disk path=/root/kvm/win12.qcow2,format=qcow2,bus=virtio,device=disk  --network network=default,model=virtio --disk path=/root/kvm/virtio-win-0.1.190.iso,device=cdrom --disk path=/root/kvm/virtio-win-0.1.190_x86.vfd,device=cdrom --vnc --vncport=5997 --vnclisten=0.0.0.0 --force --autostart
#进系统需要加载网卡驱动


# 其中image参数指代的是Windows ISO镜像的ID,而block-device参数的id则指代的是virtio镜像的ID

nova boot --image b4ba82ca-beaa-4266-81a4-9ff23ec9d524 --flavor 2 --nic net-id=1af38e89-0d44-4508-b5af-c77ea173667d --block-device source=image,dest=volume,id=acddaec0-a2db-4cae-ab05-327443cf15fe,type=cdrom,bus=ide,size=1 mytest


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45

openstack镜像导出

[root@compute ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| 43d03ed2-5ee7-4ae3-aad7-f707a6611128 |  aa    |
| 85b6d7d7-a96c-4664-bc87-355f49a28477 | centos |
| 04eda535-0adc-473a-a2d6-a14ab4877b33 | cirros |
+--------------------------------------+--------+
[root@compute ~]# glance image-download --file /root/aa.img 43d03ed2-5ee7-4ae3-aad7-f707a6611128

或者
关机,找到实例ID
[root@compute ~]# qemu-img convert -c -O qcow2 /var/lib/nova/instances/f1c19fd8-2b56-4181-b04f-afc3def87093/disk caesar.qcow2 -p
(100.00/100%)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

openstack和vmware整合

对技术实现原理图进行简要说明:

(1)Nova-Compute中含有VMware vCenter Driver(vmwareapi.VMwareVCDriver),可以与管理着ESXi Cluster的vCenter Server通讯,以此管理ESXi Cluster中的计算资源。

(2)在OpenStack的Grizzly版本中,一个Driver只支持单个ESXi Cluster。从Havana版本开始,一个Driver能够支持管理多个ESXi Cluster;

   注:Nova-Compute服务的实例可以在多个独立虚拟机上运行,也可以作为同一虚拟机上的不同进程去运行;
  • 1

(3)对由vCenter Server管理的一个个ESXi Cluster,VMware vCenter Driver会将之向Nova-Scheduler呈现为一个个单独的“主机”实体 (即:Nova-Scheduler会将一个ESXi Cluster看作是一个OpenStack的Compute Node),“主机”实体的资源由ESXi Cluster之内的ESXi物理主机节点共同组成;

(4)Nova-Scheduler会首先选择一个最为合适的ESXi Cluster ,然后,VMware vCenter Driver与vCenter Server APIs进行交互,接着,由vCenter的DRS(Distributed Resource Scheduling)在这个ESXi Cluster中选择合适的ESXi物理主机, 并在其上创建和终止虚拟机实例;

(5) 每个ESXi cluster都需要有一个datastore来进行配置和使用。

通过对Compute Node的Nova配置文件nova.conf进行配置,启用Nova vCenter Driver,将OpenStack Compute Node与vCenter相连,并且指向特定的ESXi cluster,具体的配置方法如下:

[DEFAULT]
compute_driver = vmwareapi.VMwareVCDriver

[vmware]
host_ip= <vCenter主机IP>
host_username = <vCenter用户名>
host_password = <vCenter密码>

cluster_name = <ESXi cluster #1的名称>
cluster_name = <ESXi Cluster #n的名称>

datastore_regex = <指定可用的datastore>
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

在 Nova 计算重启后配置 OpenStack 实例/虚拟机以自动启动

#此选项指定是否启动在主机重新启动之前正在运行的客户机。

vi /etc/nova/nova.conf

[DEFAULT]
resume_guests_state_on_host_boot = True
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

如何使用 cloud-config 配置 OpenStack 实例

(采取: https://docs.ukcloud.com/articles/openstack/ostack-how-cloud-config.html)

概述

当您在 OpenStack 中启动实例时,您可以将配置作为 cloud-config(有时称为 user-data)传递给实例。这使您能够在引导时快速轻松地进行配置更改。

用例

以下列表提供了一些示例,说明如何使用 cloud-config 配置实例:

  • 设置默认语言环境
  • 设置实例主机名
  • 生成实例 SSH 私钥
  • 向用户添加 SSH 密钥,.ssh/authorized_keys以便他们可以登录
  • 设置临时挂载点
  • 配置网络设备
  • 在启动时运行配置脚本

例子

您可以通过命令行或 OpenStack Horizon GUI 部署 cloud-config。在本指南中,我们使用 CLI,但您可以通过 GUI 使用相同的脚本,方法是将其粘贴到配置选项卡中。

例如,让我们启动一个实例并在启动时运行一些任意命令。

  1. 创建一个名为user-data.txt.

  2. 将以下内容添加到新文件中。

    #cloud-config
    runcmd:
     - mkdir -pv /root/cloud-config-test
     - touch /root/cloud-config-test/boottest
     - echo "boot instance test" >> /root/cloud-config-test/boottest
    
    • 1
    • 2
    • 3
    • 4
    • 5
    笔记

    您必须#cloud-config在文件顶部包含该行。

  3. 使用 CLI 引导实例,添加--user-data user-data.txt到引导命令的末尾。

  4. 实例启动后,登录实例,切换到 root 并查看目录。您将看到新文件,它将包含boot instance test.

这是一个非常简单的示例,说明您可以使用 cloud-config 做什么,但您可以从这个简单示例中看到,您可以轻松开始构建相当复杂的设置脚本。

下一步

您可以在以下位置找到 cloud-config 的文档:

https://cloudinit.readthedocs.io/en/latest/topics/examples.html

卷信息清理(无法删除卷时)

curl -g -i -X POST  http://172.16.100.2:8776/v3/7fa1c49fb2764440abaf4e936e8a14d7/volumes/4bb78f2a-e8ed-4e1c-91d0-01462f996f2d/action -H "User-Agent: python-cinderclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: $token" -d '{"os-detach": {"attachment_id": "9a3ee7c0-9f5c-4b9b-83fa-b31b3c792fd8"}}'
  • 1

RadHat注册系统并启用存储库

subscription-manager register --username=1550367108 --password=Cyx.17723942934 --auto-attach
subscription-manager list --available
subscription-manager attach --pool="8a85f99c7d76f2fd017d78ef6bf53d29"
subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms   --enable=rhel-8-for-x86_64-supplementary-rpms --enable=codeready-builder-for-rhel-8-x86_64-rpms

#字符提示
vi /etc/profile
export LC_ALL=en_US.UTF-8
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

atastore_regex = <指定可用的datastore>


## 在 Nova 计算重启后配置 OpenStack 实例/虚拟机以自动启动

  • 1
  • 2
  • 3

#此选项指定是否启动在主机重新启动之前正在运行的客户机。

vi /etc/nova/nova.conf

[DEFAULT]
resume_guests_state_on_host_boot = True


## 如何使用 cloud-config 配置 OpenStack 实例

(采取: https://docs.ukcloud.com/articles/openstack/ostack-how-cloud-config.html)

### 概述

当您在 OpenStack 中启动实例时,您可以将配置作为 cloud-config(有时称为 user-data)传递给实例。这使您能够在引导时快速轻松地进行配置更改。

### 用例

以下列表提供了一些示例,说明如何使用 cloud-config 配置实例:

- 设置默认语言环境
- 设置实例主机名
- 生成实例 SSH 私钥
- 向用户添加 SSH 密钥,`.ssh/authorized_keys`以便他们可以登录
- 设置临时挂载点
- 配置网络设备
- 在启动时运行配置脚本

### 例子

您可以通过命令行或 OpenStack Horizon GUI 部署 cloud-config。在本指南中,我们使用 CLI,但您可以通过 GUI 使用相同的脚本,方法是将其粘贴到**配置**选项卡中。

例如,让我们启动一个实例并在启动时运行一些任意命令。

1. 创建一个名为`user-data.txt`.

2. 将以下内容添加到新文件中。

   ```none
   #cloud-config
   runcmd:
    - mkdir -pv /root/cloud-config-test
    - touch /root/cloud-config-test/boottest
    - echo "boot instance test" >> /root/cloud-config-test/boottest
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
笔记

您必须#cloud-config在文件顶部包含该行。

  1. 使用 CLI 引导实例,添加--user-data user-data.txt到引导命令的末尾。

  2. 实例启动后,登录实例,切换到 root 并查看目录。您将看到新文件,它将包含boot instance test.

这是一个非常简单的示例,说明您可以使用 cloud-config 做什么,但您可以从这个简单示例中看到,您可以轻松开始构建相当复杂的设置脚本。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/菜鸟追梦旅行/article/detail/192485
推荐阅读
  

闽ICP备14008679号