赞
踩
SequoiaDB v3.2.3及以上版本支持sac一键部署集群
此次部署集群使用centos7.x版本,每台虚机内存分配2g或以上
1. 修改主机名
# 按照顺序,修改三台主机名分别为sdb01,sdb02,sdb03
vi /etc/hosts
2. 关闭防火墙
# 查看防火墙状态
systemctl status firewalld
# 永久关闭防火墙
systemctl disable firewalld
# 确认关闭
systemctl status firewalld
3. 修改系统参数(可省略)
#########调整调整 ulimit############################ vim /etc/security/limits.conf ## for sequoiadb sdbadmin - core 0 sdbadmin - data unlimited sdbadmin - fsize unlimited sdbadmin - rss unlimited sdbadmin - as unlimited sdbadmin - stack 1024 sdbadmin - nproc unlimited sdbadmin - nofile 300000 ## end for sequoiadb #检查/etc/security/limits.conf cat /etc/security/limits.conf vim /etc/security/limits.d/90-nproc.conf 将* soft nproc 1024修改为:# * soft nproc 1024 #新增 sdbadmin - nproc unlimited #检查 /etc/security/limits.d/90-nproc.conf cat /etc/security/limits.d/90-nproc.conf
4. 调整内核参数(可省略)
########调整内核参数################################ vim /etc/sysctl.conf ## begin for sequoiadb vm.swappiness = 0 vm.dirty_ratio = 100 vm.dirty_background_ratio = 80 vm.dirty_expire_centisecs = 3000 vm.dirty_writeback_centisecs = 500 vm.vfs_cache_pressure = 200 vm.min_free_kbytes = 10240 vm.overcommit_memory = 2 vm.overcommit_ratio = 85 fs.file-max = 10000000 ## end for sequoiadb #执行如下命令,使配置生效: /sbin/sysctl -p #检查配置是否生效 cat /proc/sys/vm/swappiness cat /proc/sys/vm/dirty_ratio cat /proc/sys/vm/dirty_background_ratio cat /proc/sys/vm/dirty_expire_centisecs cat /proc/sys/vm/vfs_cache_pressure cat /proc/sys/vm/min_free_kbytes cat /proc/sys/vm/overcommit_memory cat /proc/sys/vm/overcommit_ratio
5. 关闭selinux
# 检查selinux状态
[root@localhost ~]# getenforce
Enforcing
# 永久关闭:
[root@localhost ~]# vim /etc/sysconfig/selinux
# SELINUX=enforcing 改为 SELINUX=disabled
# 然后需要重启系统: init 6
5. 关闭numa,和transparent_hugepage
# 修改配置文件
# GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet numa=off transparent_hugepage=never",确保name值为off和transparent_hugepage=never
vi /etc/default/grub
# 重启服务器
reboot
# 查看numa状态
dmesg | grep -i numa
# 显示结果
[ 0.000000] Command line: BOOT_IMAGE=/vmlinuz-3.10.0-327.el7.x86_64 root=/dev/mapper/centos-root ro crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet numa=off
[ 0.000000] NUMA turned off
[ 0.000000] Kernel command line: BOOT_IMAGE=/vmlinuz-3.10.0-327.el7.x86_64 root=/dev/mapper/centos-root ro crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quietnuma=off
1. 上传安装包,解压
2. 运行sequoiadb安装文件
3. 这里om服务只在sdb01上安装,其他两台不安装
4. 一路默认即可完成安装
1. 登录sdb01,切换sdbadmin用户,检查sdbcm运行状态
# 查看sdbcm运行状态
service status sdbcm
# 若停止,则开启
service start sdbcm
2. 进入sdb shell,创建临时协调节点(使用完毕之后需要删除)
sdb
>oma = new Oma("localhsot",11790);
# 创建临时协调节点
>oma.createCoord(18800, "/opt/sequoiadb/database/coord/18800")
# 启动临时协调节点
> oma.startNode(18800)
3. 配置和启动编目节点
# 连接临时协调节点
> var db = new Sdb("localhost",18800)
# 创建一个编目节点组
> db.createCataRG("sdb01", 11800, "/opt/sequoiadb/database/cata/11800")
# 创建另外两个编目节点
> var cataRG = db.getRG("SYSCatalogGroup");
> var node1 = cataRG.createNode("sdb02", 11800,"/opt/sequoiadb/database/cata/11800")
> var node2 = cataRG.createNode("sdb03", 11800,"/opt/sequoiadb/database/cata/11800")
> node1.start()
> node2.start()
4. 配置和启动数据节点组
# 创建数据组
> db.createRG("datagroup01")
> dg = db.getRG("datagroup01")
# 增加节点
> dg.createNode("sdb01",11900,"/opt/sequoiadb/database/data/11900")
> dg.createNode("sdb02",11900,"/opt/sequoiadb/database/data/11900")
> dg.createNode("sdb03",11900,"/opt/sequoiadb/database/data/11900")
# 开启数据节点
> dg.start()
5. 配置和启动协调节点组
# 创建协调节点组
> var rg = db.createCoordRG()
> rg.createNode("sdb01",11810,"/opt/sequoiadb/database/coord/11810")
> rg.createNode("sdb02",11810,"/opt/sequoiadb/database/coord/11810")
> rg.createNode("sdb03",11810,"/opt/sequoiadb/database/coord/11810")
# 启动协调节点组
> rg.start()
6. 创建完毕之后,关闭临时协调节点并删除
> oma.removeCoord(18800)
7. 修改sequoiadb的limits.conf文件
# 使用sdbadmin用户登录
vim /opt/sequoiadb/conf/limits.conf
# 修改其中的 open_files=60000 为 open_files=300000
# 重启sdb集群节点
sdbstop
sdbstart
1. 运行sequoiadb安装tar中包含的mysql安装包,一路默认即可完成安装。
2. 配置mysql
1)增加mysql实例
# 打开mysql安装位置
cd /opt/sequoiasql/mysql/
# 运行脚本 增加实例 -p可指定端口号,默认3306
bin/sdb_sql_ctl addinst sdb01inst -D database/3306
# 检查实例配置,sdb信息是否正确
vim database/3306/auto.cnf
#修改配置信息之后重启实例
bin/sdb_sql_ctl restart sdb01inst
#确认实例
bin/sdb_sql_ctl list
2) 修改mysql实例的最大连接数
# 使用sdbadmin用户修改mysql的最大连接数:
vi /opt/sequoiasql/mysql/database/3306/auto.cnf
max_connections=3000
# 使用root用户,在服务配置文件中增加两个配置参数:
vi /usr/lib/systemd/system/sequoiasql-mysql.service
LimitNOFILE=infinity
LimitMEMLOCK=infinity
# 使用root用户,重启sequoiasql-mysql服务:
systemctl restart sequoiasql-mysql
3. 审计日志插件安装
mysql之间同步ddl需要配置审计日志
1)在mysql中创建sdbadmin用户
> CREATE USER 'sdbadmin'@'%' IDENTIFIED BY 'sdbadmin';
> GRANT all on *.* TO 'sdbadmin'@'%' with grant option;
2)将审计插件放到lib/plugin目录下
cp /opt/sequoiasql/mysql/tools/lib/server_audit.so /opt/sequoiasql/mysql/lib/plugin
3)配置mysql实例的配置文件,/opt/sequoiasql/mysql/database/3306/auto.cnf,在mysqld下添加以下内容
日志目录auditlog需要手动创建
# 加载审计插件 plugin-load=server_audit=server_audit.so # 审计记录的审计,建议只记录需要同步的DCL和DDL操作 server_audit_events=CONNECT,QUERY_DDL,QUERY_DCL # 开启审计 server_audit_logging=ON # 审计日志文件名 server_audit_file_path=/opt/sequoiasql/mysql/data/auditlog/server_audit.log # 强制切分审计日志文件 server_audit_file_rotate_now=OFF # 审计日志文件大小10MB,超过该大小进行切割,单位为byte server_audit_file_rotate_size=10485760 # 审计日志保留个数,超过后会丢弃最旧的 server_audit_file_rotations=999 # 输出类型为文件 server_audit_output_type=file # 限制每行查询日志的大小为100kb,若表比较复杂,对应的操作语句比较长,建议增大该值 server_audit_query_log_limit=102400
4)编辑config文件,配置需要同步的mysql 的ip和端口
5)启动同步脚本
python /opt/sequoiasql/mysql/tools/metaSync/meta_sync.py &
6)设置定时任务
crontab -e
# 每隔一秒执行一次
*/1 * * * * python /opt/sequoiasql/mysql/tools/metaSync/meta_sync.py &
7)重启mysql实例
sdb_sql_ctl restart sdb01inst
8)其他两台机器步骤相同
9)建库验证
1. 运行sequoiadb压缩包中的pg包。
2. 一路默认即可完成安装。
3. 创建实例,配置pg
# 创建pg实例
#1. 进入安装目录
cd /opt/sequoiasql/postgresql/
#2. 创建实例
bin/sdb_sql_ctl addinst sdb01pginst -D database/5432
#3. 运行实例
bin/sdb_sql_ctl start sdb01pginst
4. 修改系统参数,不重启数据库,否则可能会报错server closed the connection unexpectedly This probably means the server…
vim /etc/systemd/logind.conf
# 将 RemoveIPC=yes 改为no
6. 配置pg实例的配置参数文件postgresql.conf
cd /opt/sequoiasql/postgresql/database/5432/
sed -i "s/#log_destination = 'stderr'/log_destination = 'stderr'/g" postgresql.conf
sed -i "s/#logging_collector = off/logging_collector = on/g" postgresql.conf
sed -i "s/#log_directory = 'pg_log'/log_directory = 'pg_log'/g" postgresql.conf
sed -i "s/#log_connections = off/log_connections = on/g" postgresql.conf
sed -i "s/#log_disconnections = off/log_disconnections = on/g" postgresql.conf
sed -i "s/#log_line_prefix = ''/log_line_prefix = '%m %p %r %x'/g" postgresql.conf
sed -i "s/#log_directory = pg_log/log_directory = pg_log/g" postgresql.conf
sed -i "s/#log_filename = /log_filename = /g" postgresql.conf
sed -i "s/#exit_on_error = off/exit_on_error = on/g" postgresql.conf
sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/g" postgresql.conf
sed -i "s/#standard_conforming_strings = on/standard_conforming_strings = off/g" postgresql.conf
sed -i "s/#log_min_duration_statement = -1/log_min_duration_statement = 100/g" postgresql.conf
5. 进入pg shell,创建pg表
# 创建数据库,进入pg shell界面
createdb sdbadmin01
psql sdbadmin01
# 加载sdb驱动
sdbadmin01=# create extension sdb_fdw;
# 配置与sdb连接的参数
sdbadmin01=# create server sdb_server foreign data wrapper sdb_fdw options(address '127.0.0.1', service '11810', user 'sdbUserName', password 'sdbPassword', preferedinstance 'A', transaction 'off');
# 关联sdb中的集合空间和集合(集合空间和集合需要在sdb中提前建好)
sdbadmin01=# create foreign table test(id int ,name varchar) server sdb_server options(collectionspace: "sdbadmin01",collection:"test",decimal:"on)
# 插入一条数据,在sdb shell中查看对应的集合中是否有这条数据
sdbadmin01=# insert into test values(234 ,"yangss")
1. 环境要求,前期准备
spark需要jdk环境,分别在三台虚拟机配好jdk1.8的环境变量。
配置免秘钥
# 密钥生成*3
ssh-keygen -t rsa
# 拷贝公钥*3
ssh-copy-id node1
# 分发sdb01上的公钥文件
scp .ssh/authorized_keys sdb02:$PWD/.ssh
scp .ssh/authorized_keys sdb03:$PWD/.ssh
2.准备spark安装包,修改配置文件。(三台机器操作相同)
# 进入spark安装目录下的conf目录 cd /opt/spark-2.1.3-bin-hadoop2.7/conf # 重命名文件 mv spark-env.sh.template spark-env.sh # 修改,添加以下参数 cat > spark-env.sh <<EOF #!/bin/bash/env bash # 建议添加,不加也行 export JAVA_HOME=/opt/jdk1.8.0_201 # master节点 export SPARK_MASTER_HOST=sdb01 # master管理端口 export SPARK_MASTER_PORT=7077 EOF # 修改salves文件,配置主机名,sdb01,sdb02,sdb03 vim salves # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # A Spark Worker will be started on each of the machines listed below. sdb01 sdb02 sdb03
3. 启动
start-all.sh
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。