赞
踩
OS | 规格 | 主机名 | IP | vip | private IP | scanip |
---|---|---|---|---|---|---|
Centos 7.6 | 2C4G | testosa | 192.168.1.81 | 10.10.100.81 | 192.168.1.84 | 192.168.1.80 |
Centos 7.6 | 2C4G | testosb | 192.168.1.82 | 10.10.100.82 | 192.168.1.85 | 192.168.1.80 |
Centos 7.6 | 2C4G | testosc | 192.168.1.83 | 10.10.100.83 | 192.168.1.86 | 192.168.1.80 |
使用VMvare Workstation
注意:VMvare Workstation的虚拟网卡不要勾选dhcp,我们手动配置第二块网卡的IP地址
网络配置:
网络配置
[root@testosa ~]# ip a|grep ens 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 inet 192.168.1.81/24 brd 192.168.1.255 scope global noprefixroute ens33 3: ens35: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 inet 10.10.100.81/24 brd 10.10.100.255 scope global noprefixroute ens35 [root@testosa ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 TYPE="Ethernet" PROXY_METHOD="none" BROWSER_ONLY="no" BOOTPROTO="none" DEFROUTE="yes" IPV4_FAILURE_FATAL="no" IPV6INIT="yes" IPV6_AUTOCONF="yes" IPV6_DEFROUTE="yes" IPV6_FAILURE_FATAL="no" IPV6_ADDR_GEN_MODE="stable-privacy" NAME="ens33" DEVICE="ens33" ONBOOT="yes" IPADDR="192.168.1.81" PREFIX="24" GATEWAY="192.168.1.1" IPV6_PRIVACY="no" [root@testosa ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens35 TYPE="Ethernet" PROXY_METHOD="none" BROWSER_ONLY="no" BOOTPROTO="none" DEFROUTE="yes" IPV4_FAILURE_FATAL="no" IPV6INIT="yes" IPV6_AUTOCONF="yes" IPV6_DEFROUTE="yes" IPV6_FAILURE_FATAL="no" IPV6_ADDR_GEN_MODE="stable-privacy" NAME="ens35" DEVICE="ens35" ONBOOT="yes" IPADDR="10.10.100.81" PREFIX="24" GATEWAY="10.10.100.1" IPV6_PRIVACY="no" [root@testosa ~]#
安装好一台主机,完全克隆两次,得到其他主机
可参考:https://blog.csdn.net/myneth/article/details/132528842
cat >> /etc/hosts << EOF
192.168.1.81 testosa
192.168.1.82 testosb
192.168.1.83 testosc
10.10.100.81 aprv
10.10.100.82 bprv
10.10.100.83 cprv
192.168.1.84 avip
192.168.1.85 bvip
192.168.1.86 cvip
192.168.1.80 scanip
EOF
echo "export LANG=en_US" >> ~/.bash_profile
source ~/.bash_profile
/usr/sbin/groupadd -g 50001 oinstall /usr/sbin/groupadd -g 50002 dba /usr/sbin/groupadd -g 50003 oper /usr/sbin/groupadd -g 50004 asmadmin /usr/sbin/groupadd -g 50005 asmoper /usr/sbin/groupadd -g 50006 asmdba /usr/sbin/useradd -u 60001 -g oinstall -G dba,asmdba,oper oracle /usr/sbin/useradd -u 60002 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid echo grid | passwd --stdin grid echo oracle | passwd --stdin oracle #创建文件系统 pvcreate /dev/sdb vgcreate oravg /dev/sdb lvcreate -n oralv -L 190G oravg mkfs.ext4 /dev/oravg/oralv pvs vgs/vgdisplay lvs/lvdisplay mkdir -p /oracle cat >> /etc/fstab << "EOF" /dev/oravg/oralv /oracle ext4 defaults 0 0 EOF mount -a mkdir -p /oracle/app/grid mkdir -p /oracle/app/11.2.0/grid chown -R grid:oinstall /oracle mkdir -p /oracle/app/oraInventory chown -R grid:oinstall /oracle/app/oraInventory mkdir -p /oracle/app/oracle chown -R oracle:oinstall /oracle/app/oracle chmod -R 775 /oracle
#配置本地yum源 mount /dev/cdrom /mnt cd /etc/yum.repos.d mkdir bk mv *.repo bk/ cat > /etc/yum.repos.d/Centos7.repo << "EOF" [local] name=Centos7 baseurl=file:///mnt gpgcheck=0 enabled=1 EOF cat /etc/yum.repos.d/Centos7.repo #安装所需的软件 yum -y install autoconf yum -y install automake yum -y install binutils yum -y install binutils-devel yum -y install bison yum -y install cpp yum -y install dos2unix yum -y install gcc yum -y install gcc-c++ yum -y install lrzsz yum -y install python-devel yum -y install compat-db* yum -y install compat-gcc-34 yum -y install compat-gcc-34-c++ yum -y install compat-libcap1 yum -y install compat-libstdc++-33 yum -y install compat-libstdc++-33.i686 yum -y install glibc-* yum -y install glibc-*.i686 yum -y install libXpm-*.i686 yum -y install libXp.so.6 yum -y install libXt.so.6 yum -y install libXtst.so.6 yum -y install libXext yum -y install libXext.i686 yum -y install libXtst yum -y install libXtst.i686 yum -y install libX11 yum -y install libX11.i686 yum -y install libXau yum -y install libXau.i686 yum -y install libxcb yum -y install libxcb.i686 yum -y install libXi yum -y install libXi.i686 yum -y install libXtst yum -y install libstdc++-docs yum -y install libgcc_s.so.1 yum -y install libstdc++.i686 yum -y install libstdc++-devel yum -y install libstdc++-devel.i686 yum -y install libaio yum -y install libaio.i686 yum -y install libaio-devel yum -y install libaio-devel.i686 yum -y install libXp yum -y install libaio-devel yum -y install numactl yum -y install numactl-devel yum -y install make yum -y install sysstat yum -y install unixODBC yum -y install unixODBC-devel yum -y install elfutils-libelf-devel-0.97 yum -y install elfutils-libelf-devel yum -y install redhat-lsb-core yum -y install unzip yum -y install *vnc* # 安装Linux图像界面 yum groupinstall -y "X Window System" yum groupinstall -y "GNOME Desktop" "Graphical Administration Tools" #检查包的安装情况 rpm -q --qf '%{NAME}-%{VERSION}-%{RELEASE} (%{ARCH})\n'
vi /etc/security/limits.conf #ORACLE SETTING grid soft nproc 16384 grid hard nproc 16384 grid soft nofile 65536 grid hard nofile 65536 grid soft stack 32768 grid hard stack 32768 oracle soft nproc 16384 oracle hard nproc 16384 oracle soft nofile 65536 oracle hard nofile 65536 oracle soft stack 32768 oracle hard stack 32768 oracle hard memlock 2000000 oracle soft memlock 2000000 ulimit -a # nproc 操作系统对用户创建进程数的限制 # nofile 文件描述符 一个文件同时打开的会话数 也就是一个进程能够打开多少个文件 # memlock 内存锁,给oracle用户使用的最大内存,单位是KB 当前环境的物理内存为4G(grid1g,操作系统1g,我们给oracle留2g),memlock<物理内存
echo "* - nproc 16384" > /etc/security/limits.d/90-nproc.conf
echo "session required pam_limits.so" >> /etc/pam.d/login
cat /etc/pam.d/login
vi /etc/sysctl.conf #ORACLE SETTING fs.aio-max-nr = 1048576 fs.file-max = 6815744 kernel.sem = 250 32000 100 128 net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144 net.core.rmem_max = 4194304 net.core.wmem_default = 262144 net.core.wmem_max = 1048586 kernel.panic_on_oops = 1 vm.nr_hugepages = 868 kernel.shmmax = 1610612736 kernel.shmall = 393216 kernel.shmmni = 4096 sysctl -p
参数说明
--kernel.panic_on_oops = 1 程序出问题,是否继续 --vm.nr_hugepages = 1000 大内存页,物理内存超过8g,必设 经验值:sga_max_size/2m+(100~500)=1536/2m+100=868 >sga_max_size --kernel.shmmax = 1610612736 定义单个共享内存段的最大值,一定要存放下整个SGA,>SGA SGA+PGA <物理内存的80% SGA_max<物理内存的80%的80% PGA_max<物理内存的80%的20% kernel.shmall = 393216 --控制共享内存的页数 =kernel.shmmax/PAGESIZE getconf PAGESIZE --获取内存页大小 4096 kernel.shmmni = 4096 --共享内存段的数量,一个实例就是一个内存共享段
cat /proc/meminfo cat /sys/kernel/mm/transparent_hugepage/defrag [always] madvise never cat /sys/kernel/mm/transparent_hugepage/enabled [always] madvise never vi /etc/rc.d/rc.local if test -f /sys/kernel/mm/transparent_hugepage/enabled; then echo never > /sys/kernel/mm/transparent_hugepage/enabled fi if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi chmod +x /etc/rc.d/rc.local
numactl --hardware
vim /etc/default/grub
GRUB_CMDLINE_LINUX="crashkernel=auto rhgb quiet numa=off"
grub2-mkconfig -o /boot/grub2/grub.cfg
#vi /boot/grub/grub.conf
#kernel /boot/vmlinuz-2.6.18-128.1.16.0.1.el5 root=LABEL=DBSYS ro bootarea=dbsys rhgb quiet console=ttyS0,115200n8 console=tty1 crashkernel=128M@16M numa=off
systemctl set-default multi-user.target
[root@testosa ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 93G 1.9G 91G 2% / devtmpfs 1.9G 0 1.9G 0% /dev tmpfs 1.9G 0 1.9G 0% /dev/shm #/dev/shm 默认是操作系统物理内存的一半,我们设置大一点 echo "tmpfs /dev/shm tmpfs defaults,size=3072m 0 0" >>/etc/fstab mount -o remount /dev/shm [root@testosa ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 93G 2.3G 91G 3% / devtmpfs 1.9G 0 1.9G 0% /dev tmpfs 3.0G 0 3.0G 0% /dev/shm
#1、禁用SELINUX
echo "SELINUX=disabled" > /etc/selinux/config
echo "#SELINUXTYPE=targeted " >> /etc/selinux/config
setenforce 0
#2、关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
systemctl stop ntpd
systemctl disable ntpd
#三台主机的时间要一样
date -s 'Sat Aug 26 23:18:15 CST 2023'
su - grid #节点1: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' export PS1 umask 022 #alias sqlplus="rlwrap sqlplus" export TMP=/tmp export LANG=en_US export TMPDIR=$TMP ORACLE_SID=+ASM1; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF #节点2: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' export PS1 umask 022 #alias sqlplus="rlwrap sqlplus" export TMP=/tmp export LANG=en_US export TMPDIR=$TMP ORACLE_SID=+ASM2; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF #节点3: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' export PS1 umask 022 #alias sqlplus="rlwrap sqlplus" export TMP=/tmp export LANG=en_US export TMPDIR=$TMP ORACLE_SID=+ASM3; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF su - oracle #节点1: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' #alias sqlplus="rlwrap sqlplus" #alias rman="rlwrap rman" export PS1 export TMP=/tmp export LANG=en_US export TMPDIR=$TMP export ORACLE_UNQNAME=rac_db ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME ORACLE_SID=rac_db1; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF #节点2: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' #alias sqlplus="rlwrap sqlplus" #alias rman="rlwrap rman" export PS1 export TMP=/tmp export LANG=en_US export TMPDIR=$TMP export ORACLE_UNQNAME=rac_db ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME ORACLE_SID=rac_db2; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 f i EOF #节点3: cat >> ~/.bash_profile << "EOF" PS1="[`whoami`@`hostname`:"'$PWD]$' #alias sqlplus="rlwrap sqlplus" #alias rman="rlwrap rman" export PS1 export TMP=/tmp export LANG=en_US export TMPDIR=$TMP export ORACLE_UNQNAME=rac_db ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME ORACLE_SID=rac_db3; export ORACLE_SID ORACLE_TERM=xterm; export ORACLE_TERM NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH THREADS_FLAG=native; export THREADS_FLAG if [ $USER = "oracle" ] || [ $USER = "grid" ]; then if [ $SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF
#下载脚本 wget https://gitcode.net/myneth/tools/-/raw/master/tool/ssh.sh chmod +x ssh.sh #执行互信 ./ssh.sh -user grid -hosts "testosa testosb testosc" -advanced -exverify -confirm ./ssh.sh -user oracle -hosts "testosa testosb testosc" -advanced -exverify -confirm chmod 600 /home/grid/.ssh/config chmod 600 /home/oracle/.ssh/config #检查互信 su - grid for i in testos{a,b,c};do ssh $i hostname done su - oracle for i in testos{a,b,c};do ssh $i hostname done
grid 1G*3
recovery 5G*2
data 10G*1
在安装VMware软件的操作系统上,以管理员权限打开命令行工具cmd,进入到计划存放共享磁盘的目录,如d:\vm\sharedisk下,创建共享磁盘;
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm01.vmdk C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm02.vmdk C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm03.vmdk C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 5GB -a lsilogic -t 4 shared-asm04.vmdk C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 5GB -a lsilogic -t 4 shared-asm05.vmdk C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 10GB -a lsilogic -t 4 shared-asm06.vmdk # dir /b shared-asm01-flat.vmdk shared-asm01.vmdk shared-asm02-flat.vmdk shared-asm02.vmdk shared-asm03-flat.vmdk shared-asm03.vmdk shared-asm04-flat.vmdk shared-asm04.vmdk shared-asm05-flat.vmdk shared-asm05.vmdk shared-asm06-flat.vmdk shared-asm06.vmdk
挂载共享存储(每台主机都操作)
检查虚拟机配置文件
testosa.vmx
testosb.vmx
testosc.vmx
#保证配置文件中有以下内容
disk.locking = "FALSE"
disk.EnableUUID = "TRUE"
启动每台服务器检查磁盘信息
[root@testosa ~]# fdisk -l|grep 'Disk /dev/s'|sort Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors [root@testosa ~]# [root@testosb ~]# fdisk -l|grep 'Disk /dev/s'|sort Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors [root@testosb ~]# [root@testosc ~]# fdisk -l|grep 'Disk /dev/s'|sort Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors [root@testosc ~]#
raw (裸设备)
使用不方便
asmlib
Oracle推出的,解决裸设备不方便的问题
udev
动态设备管理
本次使用asmlib方式
下载工具包
cd /etc/yum.repos.d
wget https://public-yum.oracle.com/public-yum-ol7.repo
https://public-yum.oracle.com/repo/OracleLinux/OL7/latest/x86_64/getPackage/oracleasm-support-2.1.11-2.el7.x86_64.rpm
https://download.oracle.com/otn_software/asmlib/oracleasmlib-2.0.12-1.el7.x86_64.rpm
上传asmlib工具的安装包
oracleasmlib-2.0.12-1.el7.x86_64.rpm
oracleasm-support-2.1.11-2.el7.x86_64.rpm
安装
yum install -y kmod-oracleasm
rpm -ivh oracleasm-support-2.1.11-2.el7.x86_64.rpm
rpm -ivh oracleasmlib-2.0.12-1.el7.x86_64.rpm
#挂载oracleasm模块 [root@testosa ~]# oracleasm init Creating /dev/oracleasm mount point: /dev/oracleasm Loading module "oracleasm": oracleasm Configuring "oracleasm" to use device physical block size Mounting ASMlib driver filesystem: /dev/oracleasm [root@testosa ~]# #配置oracleasm驱动 [root@testosa ~]# oracleasm configure -i Configuring the Oracle ASM library driver. This will configure the on-boot properties of the Oracle ASM library driver. The following questions will determine whether the driver is loaded on boot and what permissions it will have. The current values will be shown in brackets ('[]'). Hitting <ENTER> without typing an answer will keep that current value. Ctrl-C will abort. Default user to own the driver interface []: grid Default group to own the driver interface []: asmadmin Start Oracle ASM library driver on boot (y/n) [n]: y Scan for Oracle ASM disks on boot (y/n) [y]: y Writing Oracle ASM library driver configuration: done [root@testosa ~]#
#创建一个分区就行 fdisk /dev/sdc fdisk /dev/sdd fdisk /dev/sde fdisk /dev/sdf fdisk /dev/sdg fdisk /dev/sdh #查看创建的分区(sdc1~sdh1就是刚刚创建的分区) [root@testosa ~]# ls -lsa /dev/sd*1 0 brw-rw---- 1 root disk 8, 1 Aug 27 11:54 /dev/sda1 0 brw-rw---- 1 root disk 8, 33 Aug 27 14:26 /dev/sdc1 0 brw-rw---- 1 root disk 8, 49 Aug 27 14:28 /dev/sdd1 0 brw-rw---- 1 root disk 8, 65 Aug 27 14:28 /dev/sde1 0 brw-rw---- 1 root disk 8, 81 Aug 27 14:29 /dev/sdf1 0 brw-rw---- 1 root disk 8, 97 Aug 27 14:29 /dev/sdg1 0 brw-rw---- 1 root disk 8, 113 Aug 27 14:29 /dev/sdh1 #如果建立了分区但是查询不到使用以下命令处理 kpartx -a /dev/sdc OR: partprobe /dev/sdc
oracleasm createdisk grid01 /dev/sdc1 oracleasm createdisk grid02 /dev/sdd1 oracleasm createdisk grid03 /dev/sde1 oracleasm createdisk recovery01 /dev/sdf1 oracleasm createdisk recovery02 /dev/sdg1 oracleasm createdisk data01 /dev/sdh1 [root@testosa ~]# oracleasm createdisk grid01 /dev/sdc1 Writing disk header: done Instantiating disk: done [root@testosa ~]# oracleasm createdisk grid02 /dev/sdd1 Writing disk header: done Instantiating disk: done [root@testosa ~]# oracleasm createdisk grid03 /dev/sde1 Writing disk header: done Instantiating disk: done [root@testosa ~]# oracleasm createdisk recovery01 /dev/sdf1 Writing disk header: done Instantiating disk: done [root@testosa ~]# oracleasm createdisk recovery02 /dev/sdg1 Writing disk header: done Instantiating disk: done [root@testosa ~]# oracleasm createdisk data01 /dev/sdh1 Writing disk header: done Instantiating disk: done [root@testosa ~]# #查看创建的磁盘(注意权限) [root@testosa disks]# pwd /dev/oracleasm/disks [root@testosa disks]# ll -lsa total 0 0 drwxr-xr-x 1 root root 0 Aug 27 14:38 . 0 drwxr-xr-x 4 root root 0 Aug 27 14:38 .. 0 brw-rw---- 1 grid asmadmin 8, 113 Aug 27 14:40 DATA01 0 brw-rw---- 1 grid asmadmin 8, 33 Aug 27 14:40 GRID01 0 brw-rw---- 1 grid asmadmin 8, 49 Aug 27 14:40 GRID02 0 brw-rw---- 1 grid asmadmin 8, 65 Aug 27 14:40 GRID03 0 brw-rw---- 1 grid asmadmin 8, 81 Aug 27 14:40 RECOVERY01 0 brw-rw---- 1 grid asmadmin 8, 97 Aug 27 14:40 RECOVERY02
vim /etc/sysconfig/oracleasm [root@testosa ~]# cat /etc/sysconfig/oracleasm|sed '/^$/d' # # This is a configuration file for automatic loading of the Oracle # Automatic Storage Management library kernel driver. It is generated # By running /etc/init.d/oracleasm configure. Please use that method # to modify this file # # ORACLEASM_ENABLED: 'true' means to load the driver on boot. ORACLEASM_ENABLED=true # ORACLEASM_UID: Default user owning the /dev/oracleasm mount point. ORACLEASM_UID=grid # ORACLEASM_GID: Default group owning the /dev/oracleasm mount point. ORACLEASM_GID=asmadmin # ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot. ORACLEASM_SCANBOOT=true # ORACLEASM_SCANORDER: Matching patterns to order disk scanning ORACLEASM_SCANORDER="" # ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan ORACLEASM_SCANEXCLUDE="sda sdb" # ORACLEASM_SCAN_DIRECTORIES: Scan disks under these directories ORACLEASM_SCAN_DIRECTORIES="" # ORACLEASM_USE_LOGICAL_BLOCK_SIZE: 'true' means use the logical block size # reported by the underlying disk instead of the physical. The default # is 'false' ORACLEASM_USE_LOGICAL_BLOCK_SIZE=false [root@testosa ~]# #扫描磁盘 [root@testosa ~]# oracleasm scandisks Reloading disk partitions: done Cleaning any stale ASM disks... Scanning system for ASM disks... [root@testosa ~]# #显示磁盘 [root@testosa ~]# oracleasm listdisks DATA01 GRID01 GRID02 GRID03 RECOVERY01 RECOVERY02 [root@testosa ~]#
以上创建分区、asmlib创建磁盘都是在一个节点操作即可
#其他节点开机 #查看分区信息 [root@testosb ~]# ll /dev/sd?? brw-rw---- 1 root disk 8, 1 Aug 27 11:55 /dev/sda1 brw-rw---- 1 root disk 8, 2 Aug 27 11:55 /dev/sda2 brw-rw---- 1 root disk 8, 33 Aug 27 14:48 /dev/sdc1 brw-rw---- 1 root disk 8, 49 Aug 27 14:48 /dev/sdd1 brw-rw---- 1 root disk 8, 65 Aug 27 14:48 /dev/sde1 brw-rw---- 1 root disk 8, 81 Aug 27 14:48 /dev/sdf1 brw-rw---- 1 root disk 8, 97 Aug 27 14:48 /dev/sdg1 brw-rw---- 1 root disk 8, 113 Aug 27 14:48 /dev/sdh1 #配置asmlib驱动 [root@testosb ~]# oracleasm configure -i Configuring the Oracle ASM library driver. This will configure the on-boot properties of the Oracle ASM library driver. The following questions will determine whether the driver is loaded on boot and what permissions it will have. The current values will be shown in brackets ('[]'). Hitting <ENTER> without typing an answer will keep that current value. Ctrl-C will abort. Default user to own the driver interface []: grid Default group to own the driver interface []: asmadmin Start Oracle ASM library driver on boot (y/n) [n]: y Scan for Oracle ASM disks on boot (y/n) [y]: y Writing Oracle ASM library driver configuration: done #修改asmlib配置文件,排除扫描系统自身的磁盘 [root@testosb ~]# cat /etc/sysconfig/oracleasm|sed -r '/^$|^#/d' ORACLEASM_ENABLED=true ORACLEASM_UID=grid ORACLEASM_GID=asmadmin ORACLEASM_SCANBOOT=true ORACLEASM_SCANORDER="" ORACLEASM_SCANEXCLUDE="sda sdb" ORACLEASM_SCAN_DIRECTORIES="" ORACLEASM_USE_LOGICAL_BLOCK_SIZE=false [root@testosb ~]# #扫描磁盘 oracleasm scandisks #显示磁盘 oracleasm listdisks #检查磁盘信息(注意权限) [root@testosb ~]# ll /dev/oracleasm/disks/ total 0 brw-rw---- 1 grid asmadmin 8, 113 Aug 27 14:48 DATA01 brw-rw---- 1 grid asmadmin 8, 33 Aug 27 14:48 GRID01 brw-rw---- 1 grid asmadmin 8, 49 Aug 27 14:48 GRID02 brw-rw---- 1 grid asmadmin 8, 65 Aug 27 14:48 GRID03 brw-rw---- 1 grid asmadmin 8, 81 Aug 27 14:48 RECOVERY01 brw-rw---- 1 grid asmadmin 8, 97 Aug 27 14:48 RECOVERY02 [root@testosb ~]#
#软件下载
p13390677_112040_Linux-x86-64_3of7.zip
#切换到grid用户
su - grid
#软件解压(grid用户操作)
unzip p13390677_112040_Linux-x86-64_3of7.zip
#切换root用户,先安装一个磁盘检测工具(在解压的文件夹里面找,其他节点也需要安装)
grid/rpm/cvuqdisk-1.0.9-1.rpm
以grid用户启动vncserver
设置的密码为:rootroot
[grid@testosa:/soft]$vncserver You will require a password to access your desktops. Password: Password must be at least 6 characters - try again Password: Verify: Would you like to enter a view-only password (y/n)? n A view-only password is not used xauth: file /home/grid/.Xauthority does not exist New 'testosa:1 (grid)' desktop is testosa:1 Creating default startup script /home/grid/.vnc/xstartup Creating default config /home/grid/.vnc/config Starting applications specified in /home/grid/.vnc/xstartup Log file is /home/grid/.vnc/testosa:1.log
远程使用vnc客户端,调用vncserver使用图形化界面安装
#注意使用-jreLoc选项,否则界面里面,提示框不能放大
./runInstaller -jreLoc /etc/alternatives/jre_1.8.0
如上这里的scanname一定要和配置的hosts里面一致:
[root@testosa ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.1.81 testosa 192.168.1.82 testosb 192.168.1.83 testosc 10.10.100.81 aprv 10.10.100.82 bprv 10.10.100.83 cprv 192.168.1.84 avip 192.168.1.85 bvip 192.168.1.86 cvip 192.168.1.80 scanip
检查ssh信任关系(每个节点都检查)
设置密码oracle
尝试自动修复
根据提示操作如下
[root@testosa ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log Setting Kernel Parameters... kernel.shmmax = 1610612736 kernel.shmmax = 1977098240 kernel.shmall = 393216 kernel.shmall = 2097152 Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm Preparing... ################################# [100%] package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed [root@testosa ~]# [root@testosb ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log Setting Kernel Parameters... kernel.shmmax = 1610612736 kernel.shmmax = 1977094144 kernel.shmall = 393216 kernel.shmall = 2097152 Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm Preparing... ################################# [100%] package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed [root@testosb ~]# [root@testosc ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log Setting Kernel Parameters... kernel.shmmax = 1610612736 kernel.shmmax = 1977094144 kernel.shmall = 393216 kernel.shmall = 2097152 Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm Preparing... ################################# [100%] package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed [root@testosc ~]#
执行完回来点OK,继续检查约束条件
安装pdksh-5.2.14-30.x86_64.rpm包(每个节点都安装)
[root@testosa soft]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
warning: pdksh-5.2.14-37.el5.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID e8562897: NOKEY
error: Failed dependencies:
pdksh conflicts with (installed) ksh-20120801-139.el7.x86_64
[root@testosa soft]# rpm -evh ksh-20120801-139.el7.x86_64
Preparing... ################################# [100%]
Cleaning up / removing...
1:ksh-20120801-139.el7 ################################# [100%]
[root@testosa soft]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
warning: pdksh-5.2.14-37.el5.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID e8562897: NOKEY
Preparing... ################################# [100%]
Updating / installing...
1:pdksh-5.2.14-37.el5 ################################# [100%]
Device Checks for ASM
Device Checks for ASM - This is a pre-check to verify if the specified devices meet the requirements for configuration through the Oracle Universal Storage Manager Configuration Assistant. Error: - testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"]. - Cause: Cause Of Problem Not Available - Action: User Action Not Available - testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"]. - Cause: Cause Of Problem Not Available - Action: User Action Not Available - testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"]. - Cause: Cause Of Problem Not Available - Action: User Action Not Available Operation Failed on Nodes: [testosc, testosb, testosa] Verification result of failed node: testosc Details: - Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc - Cause: Cause Of Problem Not Available - Action: User Action Not Available - PRVF-9802 : Attempt to get udev info from node "testosc" failed - Cause: Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check. - Action: Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check. Back to Top Verification result of failed node: testosb Details: - Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc - Cause: Cause Of Problem Not Available - Action: User Action Not Available - PRVF-9802 : Attempt to get udev info from node "testosb" failed - Cause: Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check. - Action: Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check. Back to Top Verification result of failed node: testosa Details: - Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc - Cause: Cause Of Problem Not Available - Action: User Action Not Available - PRVF-9802 : Attempt to get udev info from node "testosa" failed - Cause: Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check. - Action: Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check.
Network Time Protocol (NTP)
Network Time Protocol (NTP) - This task verifies cluster time synchronization on clusters that use Network Time Protocol (NTP). Error: - PRVF-5507 : NTP daemon or service is not running on any node but NTP configuration file exists on the following node(s): testosa - Cause: The configuration file was found on at least one node though no NTP daemon or service was running. - Action: If you plan to use CTSS for time synchronization then NTP configuration must be uninstalled on all nodes of the cluster. Check Failed on Nodes: [testosc, testosb, testosa] Verification result of failed node: testosc Details: - PRVF-5402 : Warning: Could not find NTP configuration file "/etc/ntp.conf" on node "testosc" - Cause: NTP might not have been configured on the node, or NTP might have been configured with a configuration file different from the one indicated. - Action: Configure NTP on the node if not done so yet. Refer to your NTP vendor documentation for details. Back to Top Verification result of failed node: testosb Details: - PRVF-5402 : Warning: Could not find NTP configuration file "/etc/ntp.conf" on node "testosb" - Cause: NTP might not have been configured on the node, or NTP might have been configured with a configuration file different from the one indicated. - Action: Configure NTP on the node if not done so yet. Refer to your NTP vendor documentation for details. Back to Top Verification result of failed node: testosa #如上删除主节点的 /etc/ntp.conf 文件
这个报错可以忽略
报错信息
Remote 'AttachHome' failed on nodes: 'testosb,testosc'. Refer to '/oracle/app/oraInventory/logs/installActions2023-08-27_04-26-34PM.log' for details.
It is recommended that the following command needs to be manually run on the failed nodes:
/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=<node on which command is to be run>.
Please refer 'AttachHome' logs under central inventory of remote nodes where failure occurred for more details.
查看日志
INFO: Invoking OUI on cluster nodes testosb INFO: /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM SEVERE: oracle.sysman.oii.oiip.oiipg.OiipgRemoteOpsException: Error occured while trying to run Unix command /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM on nodes testosb. [Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB. Actual 7628 MB PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed] at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:276) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runAnyCmdOnNodes(OiipgClusterRunCmd.java:369) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmd(OiipgClusterRunCmd.java:314) at oracle.sysman.oii.oiic.OiicBaseInventoryApp.runRemoteInvOpCmd(OiicBaseInventoryApp.java:281) at oracle.sysman.oii.oiic.OiicAttachHome.clsCmdAttachHome(OiicAttachHome.java:507) at oracle.sysman.oii.oiif.oiifw.OiifwClusterSaveInventoryWCCE.doOperation(OiifwClusterSaveInventoryWCCE.java:310) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiif.oiifw.OiifwActionsPhaseWCDE.doOperation(OiifwActionsPhaseWCDE.java:641) at oracle.sysman.oii.oiif.oiifb.OiifbLinearIterator.iterate(OiifbLinearIterator.java:147) at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicAPISelCompsInstall.doOperation(OiicInstallAPISession.java:1095) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiic.OiicInstallAPISession.doInstallAction(OiicInstallAPISession.java:679) at oracle.sysman.oii.oiic.OiicInstallAPISession.access$000(OiicInstallAPISession.java:94) at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicActionsThread.run(OiicInstallAPISession.java:971) Caused by: oracle.ops.mgmt.cluster.ClusterException: Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB. Actual 7628 MB PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed at oracle.ops.mgmt.cluster.ClusterCmd.runCmd(ClusterCmd.java:2149) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:270) ... 13 more INFO: Running command '/tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM' on the nodes 'testosc'. INFO: Invoking OUI on cluster nodes testosc INFO: /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM SEVERE: oracle.sysman.oii.oiip.oiipg.OiipgRemoteOpsException: Error occured while trying to run Unix command /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM on nodes testosc. [Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB. Actual 7628 MB PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed] at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:276) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runAnyCmdOnNodes(OiipgClusterRunCmd.java:369) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmd(OiipgClusterRunCmd.java:314) at oracle.sysman.oii.oiic.OiicBaseInventoryApp.runRemoteInvOpCmd(OiicBaseInventoryApp.java:281) at oracle.sysman.oii.oiic.OiicAttachHome.clsCmdAttachHome(OiicAttachHome.java:507) at oracle.sysman.oii.oiif.oiifw.OiifwClusterSaveInventoryWCCE.doOperation(OiifwClusterSaveInventoryWCCE.java:310) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiif.oiifw.OiifwActionsPhaseWCDE.doOperation(OiifwActionsPhaseWCDE.java:641) at oracle.sysman.oii.oiif.oiifb.OiifbLinearIterator.iterate(OiifbLinearIterator.java:147) at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicAPISelCompsInstall.doOperation(OiicInstallAPISession.java:1095) at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171) at oracle.sysman.oii.oiic.OiicInstallAPISession.doInstallAction(OiicInstallAPISession.java:679) at oracle.sysman.oii.oiic.OiicInstallAPISession.access$000(OiicInstallAPISession.java:94) at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicActionsThread.run(OiicInstallAPISession.java:971) Caused by: oracle.ops.mgmt.cluster.ClusterException: Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB. Actual 7628 MB PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed at oracle.ops.mgmt.cluster.ClusterCmd.runCmd(ClusterCmd.java:2149) at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:270) ... 13 more SEVERE: Remote 'AttachHome' failed on nodes: 'testosb,testosc'. Refer to '/oracle/app/oraInventory/logs/installActions2023-08-27_04-26-34PM.log' for details. It is recommended that the following command needs to be manually run on the failed nodes: /oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=<node on which command is to be run>. Please refer 'AttachHome' logs under central inventory of remote nodes where failure occurred for more details.
根据建议执行命令
/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb /oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc [grid@testosb:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 7628 MB Passed The Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run. Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again. : No such file or directory [grid@testosb:/home/grid]$ #检查java [root@testosa ~]# java -version openjdk version "1.8.0_382" OpenJDK Runtime Environment (build 1.8.0_382-b05) OpenJDK 64-Bit Server VM (build 25.382-b05, mixed mode) [root@testosa ~]# ll /etc/alternatives/jre_1.8.0/bin/java -rwxr-xr-x 1 root root 8984 Aug 11 22:45 /etc/alternatives/jre_1.8.0/bin/java [root@testosb ~]# which java /usr/bin/which: no java in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin) [root@testosb ~]# ll /etc/alternatives/jre_1.8.0/bin/java ls: cannot access /etc/alternatives/jre_1.8.0/bin/java: No such file or directory [root@testosb ~]# [root@testosc ~]# which java /usr/bin/which: no java in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin) [root@testosc ~]# ll /etc/alternatives/jre_1.8.0/bin/java ls: cannot access /etc/alternatives/jre_1.8.0/bin/java: No such file or directory [root@testosc ~]# #其他两个节点安装java cd /etc/alternatives/ tar -zxvf /soft/jre-8u202-linux-x64.tar.gz mv /etc/alternatives/jre1.8.0_202 /etc/alternatives/jre_1.8.0 echo 'export PATH=$PATH:/etc/alternatives/jre_1.8.0/bin:$HOME/bin' >> /etc/profile source /etc/profile #重新执行建议的命令 [grid@testosb:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 7628 MB Passed The inventory pointer is located at /etc/oraInst.loc The inventory is located at /oracle/app/oraInventory Please execute the '/oracle/app/oraInventory/orainstRoot.sh' script at the end of the session. 'AttachHome' was successful. [grid@testosb:/home/grid]$ [grid@testosc:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc Starting Oracle Universal Installer... Checking swap space: must be greater than 500 MB. Actual 7628 MB Passed The inventory pointer is located at /etc/oraInst.loc The inventory is located at /oracle/app/oraInventory Please execute the '/oracle/app/oraInventory/orainstRoot.sh' script at the end of the session. 'AttachHome' was successful. [grid@testosc:/home/grid]$
执行修复后点OK
如上根据提示执行脚本
顺序执行 先在节点a上执行完毕之后,再在其他节点执行
[root@testosa ~]# /oracle/app/oraInventory/orainstRoot.sh
Changing permissions of /oracle/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /oracle/app/oraInventory to oinstall.
The execution of the script is complete.
[root@testosa ~]#
执行root脚本报错,报错如下
ohasd failed to start
Failed to start the Clusterware. Last 20 lines of the alert log follow:
2023-08-27 18:37:06.861:
[client(90262)]CRS-2101:The OLR was formatted using version 3.
原因:因为centos7 使用的sysemd而不时initd运行继承和重启进程,而root.sh通过传统的initd运行ohasd进程
解决方法:在centos7中ohasd需要被设置为一个服务,在运行脚本root.sh之前。
#以root用户创建服务文件 cat > /usr/lib/systemd/system/ohas.service << "EOF" [Unit] Description=Oracle High Availability Services After=syslog.target [Service] ExecStart=/etc/init.d/init.ohasd run >/dev/null 2>&1 Type=simple Restart=always [Install] WantedBy=multi-user.target EOF chmod 777 /usr/lib/systemd/system/ohas.service systemctl daemon-reload systemctl enable ohas.service systemctl start ohas.service #查看ohas服务状态 [root@testosa ~]# systemctl status ohas.service * ohas.service - Oracle High Availability Services Loaded: loaded (/usr/lib/systemd/system/ohas.service; enabled; vendor preset: disabled) Active: active (running) since Sun 2023-08-27 18:48:57 CST; 6s ago Main PID: 91992 (init.ohasd) Tasks: 1 CGroup: /system.slice/ohas.service `-91992 /bin/sh /etc/init.d/init.ohasd run >/dev/null 2>&1 Type=simple Aug 27 18:48:57 testosa systemd[1]: Started Oracle High Availability Services. [root@testosa ~]#
重新执行root脚本
[root@testosa ~]# /oracle/app/11.2.0/grid/root.sh Performing root user operation for Oracle 11g The following environment variables are set as: ORACLE_OWNER= grid ORACLE_HOME= /oracle/app/11.2.0/grid Enter the full pathname of the local bin directory: [/usr/local/bin]: The contents of "dbhome" have not changed. No need to overwrite. The contents of "oraenv" have not changed. No need to overwrite. The contents of "coraenv" have not changed. No need to overwrite. Entries will be added to the /etc/oratab file as needed by Database Configuration Assistant when a database is created Finished running generic part of root script. Now product-specific root actions will be performed. Using configuration parameter file: /oracle/app/11.2.0/grid/crs/install/crsconfig_params User ignored Prerequisites during installation Installing Trace File Analyzer CRS-2672: Attempting to start 'ora.mdnsd' on 'testosa' CRS-2676: Start of 'ora.mdnsd' on 'testosa' succeeded CRS-2672: Attempting to start 'ora.gpnpd' on 'testosa' CRS-2676: Start of 'ora.gpnpd' on 'testosa' succeeded CRS-2672: Attempting to start 'ora.cssdmonitor' on 'testosa' CRS-2672: Attempting to start 'ora.gipcd' on 'testosa' CRS-2676: Start of 'ora.cssdmonitor' on 'testosa' succeeded CRS-2676: Start of 'ora.gipcd' on 'testosa' succeeded CRS-2672: Attempting to start 'ora.cssd' on 'testosa' CRS-2672: Attempting to start 'ora.diskmon' on 'testosa' CRS-2676: Start of 'ora.diskmon' on 'testosa' succeeded CRS-2676: Start of 'ora.cssd' on 'testosa' succeeded ASM created and started successfully. Disk Group grid created successfully. clscfg: -install mode specified Successfully accumulated necessary OCR keys. Creating OCR keys for user 'root', privgrp 'root'.. Operation successful. CRS-4256: Updating the profile Successful addition of voting disk 408bcc1da0314fcbbf945af1c73754bc. Successful addition of voting disk 14ebdd6a82984fffbf611eb50607a32f. Successful addition of voting disk 46e5bed09bd34fccbffa6bb39e5f694c. Successfully replaced voting disk group with +grid. CRS-4256: Updating the profile CRS-4266: Voting file(s) successfully replaced ## STATE File Universal Id File Name Disk group -- ----- ----------------- --------- --------- 1. ONLINE 408bcc1da0314fcbbf945af1c73754bc (/dev/oracleasm/disks/GRID01) [GRID] 2. ONLINE 14ebdd6a82984fffbf611eb50607a32f (/dev/oracleasm/disks/GRID02) [GRID] 3. ONLINE 46e5bed09bd34fccbffa6bb39e5f694c (/dev/oracleasm/disks/GRID03) [GRID] Located 3 voting disk(s). CRS-2672: Attempting to start 'ora.asm' on 'testosa' CRS-2676: Start of 'ora.asm' on 'testosa' succeeded CRS-2672: Attempting to start 'ora.GRID.dg' on 'testosa' CRS-2676: Start of 'ora.GRID.dg' on 'testosa' succeeded Configure Oracle Grid Infrastructure for a Cluster ... succeeded [root@testosa ~]#
同样在b、c节点执行脚本,第一次执行root脚本都会报错,报错后终止执行,启动ohas后重新执行即可
脚本执行完成后,点ok
这个报错是因为我们没有使用DNS做解析,可以忽略
grid用户下
使用命令asmca
创建好右下角exit退出即可
配置root环境变量
#把grid用户的Oracle_Home路径加入到每个节点的root环境变量里 [grid@testosa:/home/grid]$env|grep ORACLE_HOME ORACLE_HOME=/oracle/app/11.2.0/grid [root@testosa ~]# grep PATH ~/.bash_profile PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin export PATH [root@testosa ~]# source ~/.bash_profile [root@testosb ~]# grep PATH ~/.bash_profile PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin export PATH [root@testosb ~]# [root@testosc ~]# grep PATH ~/.bash_profile PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin export PATH [root@testosc ~]#
检查ocr信息
[root@testosa ~]# ocrcheck Status of Oracle Cluster Registry is as follows : Version : 3 Total space (kbytes) : 262120 Used space (kbytes) : 2748 Available space (kbytes) : 259372 ID : 1332412767 Device/File Name : +grid Device/File integrity check succeeded Device/File not configured Device/File not configured Device/File not configured Device/File not configured Cluster registry integrity check succeeded Logical corruption check succeeded [root@testosa ~]# #为ocr做个镜像 [root@testosa ~]# ocrconfig -add +recovery [root@testosa ~]# [root@testosa ~]# ocrcheck Status of Oracle Cluster Registry is as follows : Version : 3 Total space (kbytes) : 262120 Used space (kbytes) : 2748 Available space (kbytes) : 259372 ID : 1332412767 Device/File Name : +grid Device/File integrity check succeeded Device/File Name : +recovery Device/File integrity check succeeded Device/File not configured Device/File not configured Device/File not configured Cluster registry integrity check succeeded Logical corruption check succeeded [root@testosa ~]#
上传安装包
p13390677_112040_Linux-x86-64_1of7.zip
p13390677_112040_Linux-x86-64_2of7.zip
oracle用户解压安装包
su - oracle unzip p13390677_112040_Linux-x86-64_1of7.zip unzip p13390677_112040_Linux-x86-64_2of7.zip #启动vnc [oracle@testosa:/soft]$vncserver You will require a password to access your desktops. Password: Verify: Would you like to enter a view-only password (y/n)? m A view-only password is not used xauth: file /home/oracle/.Xauthority does not exist New 'testosa:6 (oracle)' desktop is testosa:6 Creating default startup script /home/oracle/.vnc/xstartup Creating default config /home/oracle/.vnc/config Starting applications specified in /home/oracle/.vnc/xstartup Log file is /home/oracle/.vnc/testosa:6.log [oracle@testosa:/soft]$
远程使用vnc客户端调用图形化界面进行安装
cd database
./runInstaller -jreLoc /etc/alternatives/jre_1.8.0
检查各个节点的互信:
Single Client Access Name (SCAN) - This test verifies the Single Client Access Name configuration. Error: - PRVG-1101 : SCAN name "scanip" failed to resolve - Cause: An attempt to resolve specified SCAN name to a list of IP addresses failed because SCAN could not be resolved in DNS or GNS using 'nslookup'. - Action: Check whether the specified SCAN name is correct. If SCAN name should be resolved in DNS, check the configuration of SCAN name in DNS. If it should be resolved in GNS make sure that GNS resource is online. - PRVF-4657 : Name resolution setup check for "scanip" (IP address: 192.168.1.80) failed - Cause: Inconsistent IP address definitions found for the SCAN name identified using DNS and configured name resolution mechanism(s). - Action: Look up the SCAN name with nslookup, and make sure the returned IP addresses are consistent with those defined in NIS and /etc/hosts as configured in /etc/nsswitch.conf by reconfiguring the latter. Check the Name Service Cache Daemon (/usr/sbin/nscd) by clearing its cache and restarting it. Check Failed on Nodes: [testosc, testosb, testosa] Verification result of failed node: testosc Details: - PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip" - Cause: The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified. - Action: Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly. Back to Top Verification result of failed node: testosb Details: - PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip" - Cause: The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified. - Action: Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly. Back to Top Verification result of failed node: testosa Details: - PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip" - Cause: The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified. - Action: Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly.
这个报错可以忽略
Error in invoking target 'agent nmhs' of makefile '/oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk'. See '/oracle/app/oraInventory/logs/installActions2023-08-27_09-02-21PM.log' for details.
这是linux7系统的bug,可以规避这个错误(加个参数)
根据提示找见这个文件:/oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk
为防止后期再次使用这个文件,先做个备份
cp /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk.bak
#如下176行加上 -lnnz11
vim /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk
171 #===========================
172 # emdctl
173 #===========================
174
175 $(SYSMANBIN)emdctl:
176 $(MK_EMAGENT_NMECTL) -lnnz11
修改好之后,点Retry
[root@testosa ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh Performing root user operation for Oracle 11g The following environment variables are set as: ORACLE_OWNER= oracle ORACLE_HOME= /oracle/app/oracle/product/11.2.0/db_1 Enter the full pathname of the local bin directory: [/usr/local/bin]: The contents of "dbhome" have not changed. No need to overwrite. The contents of "oraenv" have not changed. No need to overwrite. The contents of "coraenv" have not changed. No need to overwrite. Entries will be added to the /etc/oratab file as needed by Database Configuration Assistant when a database is created Finished running generic part of root script. Now product-specific root actions will be performed. Finished product-specific root actions. [root@testosa ~]# [root@testosb ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh Performing root user operation for Oracle 11g The following environment variables are set as: ORACLE_OWNER= oracle ORACLE_HOME= /oracle/app/oracle/product/11.2.0/db_1 Enter the full pathname of the local bin directory: [/usr/local/bin]: The contents of "dbhome" have not changed. No need to overwrite. The contents of "oraenv" have not changed. No need to overwrite. The contents of "coraenv" have not changed. No need to overwrite. Entries will be added to the /etc/oratab file as needed by Database Configuration Assistant when a database is created Finished running generic part of root script. Now product-specific root actions will be performed. Finished product-specific root actions. [root@testosb ~]# [root@testosc ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh Performing root user operation for Oracle 11g The following environment variables are set as: ORACLE_OWNER= oracle ORACLE_HOME= /oracle/app/oracle/product/11.2.0/db_1 Enter the full pathname of the local bin directory: [/usr/local/bin]: The contents of "dbhome" have not changed. No need to overwrite. The contents of "oraenv" have not changed. No need to overwrite. The contents of "coraenv" have not changed. No need to overwrite. Entries will be added to the /etc/oratab file as needed by Database Configuration Assistant when a database is created Finished running generic part of root script. Now product-specific root actions will be performed. Finished product-specific root actions. [root@testosc ~]#
使用dbca命令调出图像界面
如上输入安装grid时候创建的管理ASM磁盘组管理员的密码:oracle
这里设置的vault的密码为:VaultPas1#
如上图所示:当前三个节点,每个节点一个线程,每个节点两个redo日志文件
生产环境redo文件至少200m一个,每个节点(线程)至少5个
建库的时候我们选择pool的方式来管理实例,所有这里自动生成的实例名字是db名加下划线数字组合
[grid@testosa:/home/grid]$lsnrctl status LSNRCTL for Linux: Version 11.2.0.4.0 - Production on 27-AUG-2023 22:55:55 Copyright (c) 1991, 2013, Oracle. All rights reserved. Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER))) STATUS of the LISTENER ------------------------ Alias LISTENER Version TNSLSNR for Linux: Version 11.2.0.4.0 - Production Start Date 27-AUG-2023 19:31:29 Uptime 0 days 3 hr. 24 min. 25 sec Trace Level off Security ON: Local OS Authentication SNMP OFF Listener Parameter File /oracle/app/11.2.0/grid/network/admin/listener.ora Listener Log File /oracle/app/grid/diag/tnslsnr/testosa/listener/alert/log.xml Listening Endpoints Summary... (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.81)(PORT=1521))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.84)(PORT=1521))) Services Summary... Service "+ASM" has 1 instance(s). Instance "+ASM1", status READY, has 1 handler(s) for this service... Service "rac_db" has 1 instance(s). Instance "racdb_1", status READY, has 1 handler(s) for this service... Service "racdbXDB" has 1 instance(s). Instance "racdb_1", status READY, has 1 handler(s) for this service... The command completed successfully [grid@testosa:/home/grid]$ [oracle@testosa:/home/oracle]$lsnrctl status LSNRCTL for Linux: Version 11.2.0.4.0 - Production on 27-AUG-2023 22:56:41 Copyright (c) 1991, 2013, Oracle. All rights reserved. Connecting to (ADDRESS=(PROTOCOL=tcp)(HOST=)(PORT=1521)) STATUS of the LISTENER ------------------------ Alias LISTENER Version TNSLSNR for Linux: Version 11.2.0.4.0 - Production Start Date 27-AUG-2023 19:31:29 Uptime 0 days 3 hr. 25 min. 11 sec Trace Level off Security ON: Local OS Authentication SNMP OFF Listener Parameter File /oracle/app/11.2.0/grid/network/admin/listener.ora Listener Log File /oracle/app/grid/diag/tnslsnr/testosa/listener/alert/log.xml Listening Endpoints Summary... (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.81)(PORT=1521))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.84)(PORT=1521))) Services Summary... Service "+ASM" has 1 instance(s). Instance "+ASM1", status READY, has 1 handler(s) for this service... Service "rac_db" has 1 instance(s). Instance "racdb_1", status READY, has 1 handler(s) for this service... Service "racdbXDB" has 1 instance(s). Instance "racdb_1", status READY, has 1 handler(s) for this service... The command completed successfully [oracle@testosa:/home/oracle]$
集群资源检查
[root@testosa ~]# crsctl status res -t -------------------------------------------------------------------------------- NAME TARGET STATE SERVER STATE_DETAILS -------------------------------------------------------------------------------- Local Resources -------------------------------------------------------------------------------- ora.DATA.dg ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc ora.GRID.dg ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc ora.LISTENER.lsnr ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc ora.RECOVERY.dg ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc ora.asm ONLINE ONLINE testosa Started ONLINE ONLINE testosb Started ONLINE ONLINE testosc Started ora.gsd OFFLINE OFFLINE testosa OFFLINE OFFLINE testosb OFFLINE OFFLINE testosc ora.net1.network ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc ora.ons ONLINE ONLINE testosa ONLINE ONLINE testosb ONLINE ONLINE testosc -------------------------------------------------------------------------------- Cluster Resources -------------------------------------------------------------------------------- ora.LISTENER_SCAN1.lsnr 1 ONLINE ONLINE testosa ora.cvu 1 ONLINE ONLINE testosa ora.oc4j 1 ONLINE ONLINE testosa ora.rac_db.db 1 ONLINE ONLINE testosa Open 2 ONLINE ONLINE testosb Open 3 ONLINE ONLINE testosc Open ora.scan1.vip 1 ONLINE ONLINE testosa ora.testosa.vip 1 ONLINE ONLINE testosa ora.testosb.vip 1 ONLINE ONLINE testosb ora.testosc.vip 1 ONLINE ONLINE testosc [root@testosa ~]#
https://blog.csdn.net/mengxiangfeiyang/article/details/129034747
http://www.fgedu.net.cn/7427.html
https://blog.51cto.com/u_12991611/5720816
https://blog.csdn.net/krusher2016/article/details/104462844/
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。