当前位置:   article > 正文

VMvare workstation创建Centos7.6虚拟机安装Oracle 11gR2 RAC 三节点(ASMlib管理asm磁盘)_oracle 11g rac asm

oracle 11g rac asm

Centos7.6+Oracle11.2+ASMlib

项目规划

OS规格主机名IPvipprivate IPscanip
Centos 7.62C4Gtestosa192.168.1.8110.10.100.81192.168.1.84192.168.1.80
Centos 7.62C4Gtestosb192.168.1.8210.10.100.82192.168.1.85192.168.1.80
Centos 7.62C4Gtestosc192.168.1.8310.10.100.83192.168.1.86192.168.1.80

Linux服务器操作系统安装

使用VMvare Workstation

注意:VMvare Workstation的虚拟网卡不要勾选dhcp,我们手动配置第二块网卡的IP地址

网络配置:

image-20230826215232247

image-20230826215253187

image-20230826220112577

image-20230826215107004

image-20230826215134394

网络配置

[root@testosa ~]# ip a|grep ens
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.1.81/24 brd 192.168.1.255 scope global noprefixroute ens33
3: ens35: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 10.10.100.81/24 brd 10.10.100.255 scope global noprefixroute ens35
[root@testosa ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.1.81"
PREFIX="24"
GATEWAY="192.168.1.1"
IPV6_PRIVACY="no"
[root@testosa ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens35
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens35"
DEVICE="ens35"
ONBOOT="yes"
IPADDR="10.10.100.81"
PREFIX="24"
GATEWAY="10.10.100.1"
IPV6_PRIVACY="no"
[root@testosa ~]#     
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44

安装好一台主机,完全克隆两次,得到其他主机

可参考:https://blog.csdn.net/myneth/article/details/132528842

Linux服务器配置

hosts文件配置

cat >> /etc/hosts << EOF
192.168.1.81 testosa
192.168.1.82 testosb
192.168.1.83 testosc

10.10.100.81 aprv
10.10.100.82 bprv
10.10.100.83 cprv

192.168.1.84  avip
192.168.1.85  bvip
192.168.1.86  cvip

192.168.1.80  scanip
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

配置语言环境

echo "export LANG=en_US" >>  ~/.bash_profile
source ~/.bash_profile
  • 1
  • 2

创建用户、组、目录、文件系统

/usr/sbin/groupadd -g 50001 oinstall
/usr/sbin/groupadd -g 50002 dba
/usr/sbin/groupadd -g 50003 oper
/usr/sbin/groupadd -g 50004 asmadmin
/usr/sbin/groupadd -g 50005 asmoper
/usr/sbin/groupadd -g 50006 asmdba
/usr/sbin/useradd -u 60001 -g oinstall -G dba,asmdba,oper oracle
/usr/sbin/useradd -u 60002 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid


echo grid | passwd --stdin grid
echo oracle | passwd --stdin oracle


#创建文件系统
pvcreate /dev/sdb
vgcreate oravg /dev/sdb
lvcreate -n oralv -L 190G oravg
mkfs.ext4 /dev/oravg/oralv

pvs
vgs/vgdisplay
lvs/lvdisplay

mkdir -p /oracle

cat >> /etc/fstab << "EOF"
/dev/oravg/oralv /oracle ext4 defaults 0 0
EOF

mount -a



mkdir -p /oracle/app/grid
mkdir -p /oracle/app/11.2.0/grid
chown -R grid:oinstall /oracle

mkdir -p /oracle/app/oraInventory
chown -R grid:oinstall /oracle/app/oraInventory

mkdir -p /oracle/app/oracle
chown -R oracle:oinstall /oracle/app/oracle
chmod -R 775 /oracle
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44

配置yum软件安装环境及软件包安装

#配置本地yum源
mount /dev/cdrom /mnt
cd /etc/yum.repos.d
mkdir bk
mv *.repo bk/


cat > /etc/yum.repos.d/Centos7.repo << "EOF"
[local]
name=Centos7
baseurl=file:///mnt
gpgcheck=0
enabled=1
EOF

cat /etc/yum.repos.d/Centos7.repo



#安装所需的软件
yum -y install autoconf
yum -y install automake
yum -y install binutils
yum -y install binutils-devel
yum -y install bison
yum -y install cpp
yum -y install dos2unix
yum -y install gcc
yum -y install gcc-c++
yum -y install lrzsz
yum -y install python-devel
yum -y install compat-db*
yum -y install compat-gcc-34
yum -y install compat-gcc-34-c++
yum -y install compat-libcap1
yum -y install compat-libstdc++-33
yum -y install compat-libstdc++-33.i686
yum -y install glibc-*
yum -y install glibc-*.i686
yum -y install libXpm-*.i686
yum -y install libXp.so.6
yum -y install libXt.so.6
yum -y install libXtst.so.6
yum -y install libXext
yum -y install libXext.i686
yum -y install libXtst
yum -y install libXtst.i686
yum -y install libX11
yum -y install libX11.i686
yum -y install libXau
yum -y install libXau.i686
yum -y install libxcb
yum -y install libxcb.i686
yum -y install libXi
yum -y install libXi.i686
yum -y install libXtst
yum -y install libstdc++-docs
yum -y install libgcc_s.so.1
yum -y install libstdc++.i686
yum -y install libstdc++-devel
yum -y install libstdc++-devel.i686
yum -y install libaio
yum -y install libaio.i686
yum -y install libaio-devel
yum -y install libaio-devel.i686
yum -y install libXp
yum -y install libaio-devel
yum -y install numactl
yum -y install numactl-devel
yum -y install make
yum -y install sysstat
yum -y install unixODBC
yum -y install unixODBC-devel
yum -y install elfutils-libelf-devel-0.97
yum -y install elfutils-libelf-devel
yum -y install redhat-lsb-core
yum -y install unzip
yum -y install *vnc*

# 安装Linux图像界面
yum groupinstall -y "X Window System"
yum groupinstall -y "GNOME Desktop" "Graphical Administration Tools" 


#检查包的安装情况
rpm -q --qf '%{NAME}-%{VERSION}-%{RELEASE} (%{ARCH})\n' 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86

修改系统相关参数

修改系统资源限制参数
vi /etc/security/limits.conf

#ORACLE SETTING
grid                 soft    nproc   16384 
grid                 hard    nproc   16384
grid                 soft    nofile  65536
grid                 hard    nofile  65536
grid          soft   stack    32768
grid          hard   stack    32768
oracle               soft    nproc   16384
oracle               hard    nproc   16384
oracle               soft    nofile  65536
oracle               hard    nofile  65536
oracle        soft   stack    32768
oracle         hard   stack    32768
oracle               hard    memlock 2000000
oracle               soft    memlock  2000000


ulimit -a


# nproc  操作系统对用户创建进程数的限制
# nofile  文件描述符   一个文件同时打开的会话数 也就是一个进程能够打开多少个文件
# memlock   内存锁,给oracle用户使用的最大内存,单位是KB
当前环境的物理内存为4G(grid1g,操作系统1g,我们给oracle留2g),memlock<物理内存
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
修改nproc参数
echo "* - nproc 16384" > /etc/security/limits.d/90-nproc.conf
  • 1
控制给用户分配的资源
echo "session    required     pam_limits.so" >> /etc/pam.d/login
cat /etc/pam.d/login
  • 1
  • 2
修改内核参数
vi /etc/sysctl.conf

#ORACLE SETTING
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
kernel.panic_on_oops = 1
vm.nr_hugepages = 868
kernel.shmmax = 1610612736
kernel.shmall = 393216
kernel.shmmni = 4096

sysctl -p
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

参数说明

--kernel.panic_on_oops = 1
程序出问题,是否继续
--vm.nr_hugepages = 1000
大内存页,物理内存超过8g,必设
经验值:sga_max_size/2m+(100~500)=1536/2m+100=868
>sga_max_size

--kernel.shmmax = 1610612736
定义单个共享内存段的最大值,一定要存放下整个SGA,>SGA 
SGA+PGA <物理内存的80%
SGA_max<物理内存的80%的80%
PGA_max<物理内存的80%的20%

kernel.shmall = 393216
--控制共享内存的页数  =kernel.shmmax/PAGESIZE
getconf PAGESIZE    --获取内存页大小   4096

kernel.shmmni = 4096
--共享内存段的数量,一个实例就是一个内存共享段
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
关闭透明页
cat /proc/meminfo 

cat /sys/kernel/mm/transparent_hugepage/defrag
[always] madvise never

cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never

vi /etc/rc.d/rc.local

if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi

chmod +x /etc/rc.d/rc.local
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
关闭numa功能
numactl --hardware

vim /etc/default/grub
GRUB_CMDLINE_LINUX="crashkernel=auto rhgb quiet numa=off"
grub2-mkconfig -o /boot/grub2/grub.cfg

#vi /boot/grub/grub.conf
#kernel /boot/vmlinuz-2.6.18-128.1.16.0.1.el5 root=LABEL=DBSYS ro bootarea=dbsys rhgb quiet console=ttyS0,115200n8 console=tty1 crashkernel=128M@16M numa=off
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
设置字符界面启动操作系统
systemctl set-default multi-user.target
  • 1
共享内存段
[root@testosa ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2        93G  1.9G   91G   2% /
devtmpfs        1.9G     0  1.9G   0% /dev
tmpfs           1.9G     0  1.9G   0% /dev/shm

#/dev/shm 默认是操作系统物理内存的一半,我们设置大一点

echo "tmpfs                   /dev/shm                tmpfs   defaults,size=3072m        0 0" >>/etc/fstab
mount -o remount /dev/shm

[root@testosa ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda2                 93G  2.3G   91G   3% /
devtmpfs                 1.9G     0  1.9G   0% /dev
tmpfs                    3.0G     0  3.0G   0% /dev/shm
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

配置安全

#1、禁用SELINUX
echo "SELINUX=disabled" > /etc/selinux/config
echo "#SELINUXTYPE=targeted " >> /etc/selinux/config
setenforce 0


#2、关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

禁用NTP

systemctl stop ntpd
systemctl disable ntpd

#三台主机的时间要一样
date -s 'Sat Aug 26 23:18:15 CST 2023'
  • 1
  • 2
  • 3
  • 4
  • 5

配置grid/oracle 环境变量

su - grid

#节点1:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
umask 022
#alias sqlplus="rlwrap sqlplus"
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
ORACLE_SID=+ASM1; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE
ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
EOF

#节点2:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
umask 022
#alias sqlplus="rlwrap sqlplus"
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
ORACLE_SID=+ASM2; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE
ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
EOF

#节点3:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
umask 022
#alias sqlplus="rlwrap sqlplus"
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
ORACLE_SID=+ASM3; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/oracle/app/grid; export ORACLE_BASE
ORACLE_HOME=/oracle/app/11.2.0/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=.:$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
EOF


su - oracle

#节点1:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
export ORACLE_UNQNAME=rac_db
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME
ORACLE_SID=rac_db1; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG
PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
EOF


#节点2:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
export ORACLE_UNQNAME=rac_db
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME
ORACLE_SID=rac_db2; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG
PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
f
i
EOF

#节点3:
cat >> ~/.bash_profile << "EOF"
PS1="[`whoami`@`hostname`:"'$PWD]$'
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US
export TMPDIR=$TMP
export ORACLE_UNQNAME=rac_db
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1; export ORACLE_HOME
ORACLE_SID=rac_db3; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK;export NLS_LANG
PATH=.:$PATH:$HOME/bin:$ORACLE_BASE/product/11.2.0/db_1/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
EOF
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174

配置SSH信任关系

#下载脚本
wget https://gitcode.net/myneth/tools/-/raw/master/tool/ssh.sh
chmod +x ssh.sh

#执行互信
./ssh.sh -user grid -hosts "testosa testosb testosc" -advanced -exverify -confirm
./ssh.sh -user oracle -hosts "testosa testosb testosc" -advanced -exverify -confirm


chmod 600 /home/grid/.ssh/config
chmod 600 /home/oracle/.ssh/config


#检查互信
su - grid
for i in testos{a,b,c};do
ssh $i hostname 
done

su - oracle
for i in testos{a,b,c};do
ssh $i hostname 
done
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23

RAC 共享存储规划与配置

共享磁盘规划

grid 1G*3
recovery 5G*2
data 10G*1
  • 1
  • 2
  • 3

VMvare workstation创建共享磁盘

在安装VMware软件的操作系统上,以管理员权限打开命令行工具cmd,进入到计划存放共享磁盘的目录,如d:\vm\sharedisk下,创建共享磁盘;

C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm01.vmdk
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm02.vmdk
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 1GB -a lsilogic -t 4 shared-asm03.vmdk
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 5GB -a lsilogic -t 4 shared-asm04.vmdk
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 5GB -a lsilogic -t 4 shared-asm05.vmdk
C:\"Program Files (x86)"\VMware\"VMware Workstation"\vmware-vdiskmanager -c -s 10GB -a lsilogic -t 4 shared-asm06.vmdk

# dir /b
shared-asm01-flat.vmdk
shared-asm01.vmdk
shared-asm02-flat.vmdk
shared-asm02.vmdk
shared-asm03-flat.vmdk
shared-asm03.vmdk
shared-asm04-flat.vmdk
shared-asm04.vmdk
shared-asm05-flat.vmdk
shared-asm05.vmdk
shared-asm06-flat.vmdk
shared-asm06.vmdk
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

挂载共享存储(每台主机都操作)

image-20230827112212834

image-20230827112228618

image-20230827112246558

image-20230827112346685

image-20230827112414226

image-20230827112747189

检查虚拟机配置文件

testosa.vmx
testosb.vmx
testosc.vmx

#保证配置文件中有以下内容
disk.locking = "FALSE"
disk.EnableUUID = "TRUE"
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

启动每台服务器检查磁盘信息

[root@testosa ~]# fdisk -l|grep 'Disk /dev/s'|sort
Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors
Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors
[root@testosa ~]#


[root@testosb ~]# fdisk -l|grep 'Disk /dev/s'|sort
Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors
Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors
[root@testosb ~]# 



[root@testosc ~]# fdisk -l|grep 'Disk /dev/s'|sort
Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors
Disk /dev/sdc: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdd: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sde: 1073 MB, 1073741824 bytes, 2097152 sectors
Disk /dev/sdf: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdg: 5368 MB, 5368709120 bytes, 10485760 sectors
Disk /dev/sdh: 10.7 GB, 10737418240 bytes, 20971520 sectors
[root@testosc ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35

ASM共享磁盘配置

磁盘使用方式

  • raw (裸设备)

    使用不方便

  • asmlib

    Oracle推出的,解决裸设备不方便的问题

  • udev

    动态设备管理

本次使用asmlib方式

下载工具包

cd /etc/yum.repos.d
wget https://public-yum.oracle.com/public-yum-ol7.repo
https://public-yum.oracle.com/repo/OracleLinux/OL7/latest/x86_64/getPackage/oracleasm-support-2.1.11-2.el7.x86_64.rpm
https://download.oracle.com/otn_software/asmlib/oracleasmlib-2.0.12-1.el7.x86_64.rpm
  • 1
  • 2
  • 3
  • 4

上传asmlib工具的安装包

oracleasmlib-2.0.12-1.el7.x86_64.rpm
oracleasm-support-2.1.11-2.el7.x86_64.rpm
  • 1
  • 2

安装

yum install -y kmod-oracleasm
rpm -ivh oracleasm-support-2.1.11-2.el7.x86_64.rpm
rpm -ivh oracleasmlib-2.0.12-1.el7.x86_64.rpm
  • 1
  • 2
  • 3

配置asmlib驱动

#挂载oracleasm模块
[root@testosa ~]# oracleasm init
Creating /dev/oracleasm mount point: /dev/oracleasm
Loading module "oracleasm": oracleasm
Configuring "oracleasm" to use device physical block size
Mounting ASMlib driver filesystem: /dev/oracleasm
[root@testosa ~]# 

#配置oracleasm驱动
[root@testosa ~]# oracleasm configure -i
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

为磁盘创建分区

#创建一个分区就行
fdisk /dev/sdc
fdisk /dev/sdd
fdisk /dev/sde
fdisk /dev/sdf
fdisk /dev/sdg
fdisk /dev/sdh

#查看创建的分区(sdc1~sdh1就是刚刚创建的分区)
[root@testosa ~]# ls -lsa /dev/sd*1
0 brw-rw---- 1 root disk 8,   1 Aug 27 11:54 /dev/sda1
0 brw-rw---- 1 root disk 8,  33 Aug 27 14:26 /dev/sdc1
0 brw-rw---- 1 root disk 8,  49 Aug 27 14:28 /dev/sdd1
0 brw-rw---- 1 root disk 8,  65 Aug 27 14:28 /dev/sde1
0 brw-rw---- 1 root disk 8,  81 Aug 27 14:29 /dev/sdf1
0 brw-rw---- 1 root disk 8,  97 Aug 27 14:29 /dev/sdg1
0 brw-rw---- 1 root disk 8, 113 Aug 27 14:29 /dev/sdh1

#如果建立了分区但是查询不到使用以下命令处理
kpartx -a /dev/sdc
OR:
partprobe /dev/sdc
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22

使用asmlib创建磁盘

oracleasm createdisk grid01 /dev/sdc1
oracleasm createdisk grid02 /dev/sdd1
oracleasm createdisk grid03 /dev/sde1
oracleasm createdisk recovery01 /dev/sdf1
oracleasm createdisk recovery02 /dev/sdg1
oracleasm createdisk data01 /dev/sdh1


[root@testosa ~]# oracleasm createdisk grid01 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# oracleasm createdisk grid02 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# oracleasm createdisk grid03 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# oracleasm createdisk recovery01 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# oracleasm createdisk recovery02 /dev/sdg1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# oracleasm createdisk data01 /dev/sdh1
Writing disk header: done
Instantiating disk: done
[root@testosa ~]# 


#查看创建的磁盘(注意权限)
[root@testosa disks]# pwd
/dev/oracleasm/disks
[root@testosa disks]# ll -lsa
total 0
0 drwxr-xr-x 1 root root          0 Aug 27 14:38 .
0 drwxr-xr-x 4 root root          0 Aug 27 14:38 ..
0 brw-rw---- 1 grid asmadmin 8, 113 Aug 27 14:40 DATA01
0 brw-rw---- 1 grid asmadmin 8,  33 Aug 27 14:40 GRID01
0 brw-rw---- 1 grid asmadmin 8,  49 Aug 27 14:40 GRID02
0 brw-rw---- 1 grid asmadmin 8,  65 Aug 27 14:40 GRID03
0 brw-rw---- 1 grid asmadmin 8,  81 Aug 27 14:40 RECOVERY01
0 brw-rw---- 1 grid asmadmin 8,  97 Aug 27 14:40 RECOVERY02
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42

修改配置文件

vim /etc/sysconfig/oracleasm

[root@testosa ~]# cat /etc/sysconfig/oracleasm|sed '/^$/d'
#
# This is a configuration file for automatic loading of the Oracle
# Automatic Storage Management library kernel driver.  It is generated
# By running /etc/init.d/oracleasm configure.  Please use that method
# to modify this file
#
# ORACLEASM_ENABLED: 'true' means to load the driver on boot.
ORACLEASM_ENABLED=true
# ORACLEASM_UID: Default user owning the /dev/oracleasm mount point.
ORACLEASM_UID=grid
# ORACLEASM_GID: Default group owning the /dev/oracleasm mount point.
ORACLEASM_GID=asmadmin
# ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot.
ORACLEASM_SCANBOOT=true
# ORACLEASM_SCANORDER: Matching patterns to order disk scanning
ORACLEASM_SCANORDER=""
# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan
ORACLEASM_SCANEXCLUDE="sda sdb"
# ORACLEASM_SCAN_DIRECTORIES: Scan disks under these directories
ORACLEASM_SCAN_DIRECTORIES=""
# ORACLEASM_USE_LOGICAL_BLOCK_SIZE: 'true' means use the logical block size
# reported by the underlying disk instead of the physical. The default
# is 'false'
ORACLEASM_USE_LOGICAL_BLOCK_SIZE=false
[root@testosa ~]# 


#扫描磁盘
[root@testosa ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@testosa ~]# 


#显示磁盘
[root@testosa ~]# oracleasm listdisks
DATA01
GRID01
GRID02
GRID03
RECOVERY01
RECOVERY02
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47

其他节点操作

以上创建分区、asmlib创建磁盘都是在一个节点操作即可

#其他节点开机
#查看分区信息
[root@testosb ~]# ll /dev/sd??
brw-rw---- 1 root disk 8,   1 Aug 27 11:55 /dev/sda1
brw-rw---- 1 root disk 8,   2 Aug 27 11:55 /dev/sda2
brw-rw---- 1 root disk 8,  33 Aug 27 14:48 /dev/sdc1
brw-rw---- 1 root disk 8,  49 Aug 27 14:48 /dev/sdd1
brw-rw---- 1 root disk 8,  65 Aug 27 14:48 /dev/sde1
brw-rw---- 1 root disk 8,  81 Aug 27 14:48 /dev/sdf1
brw-rw---- 1 root disk 8,  97 Aug 27 14:48 /dev/sdg1
brw-rw---- 1 root disk 8, 113 Aug 27 14:48 /dev/sdh1

#配置asmlib驱动
[root@testosb ~]# oracleasm configure -i
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: grid 
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done

#修改asmlib配置文件,排除扫描系统自身的磁盘
[root@testosb ~]# cat /etc/sysconfig/oracleasm|sed -r '/^$|^#/d'
ORACLEASM_ENABLED=true
ORACLEASM_UID=grid
ORACLEASM_GID=asmadmin
ORACLEASM_SCANBOOT=true
ORACLEASM_SCANORDER=""
ORACLEASM_SCANEXCLUDE="sda sdb"
ORACLEASM_SCAN_DIRECTORIES=""
ORACLEASM_USE_LOGICAL_BLOCK_SIZE=false
[root@testosb ~]# 


#扫描磁盘
oracleasm scandisks

#显示磁盘
oracleasm listdisks

#检查磁盘信息(注意权限)
[root@testosb ~]# ll /dev/oracleasm/disks/
total 0
brw-rw---- 1 grid asmadmin 8, 113 Aug 27 14:48 DATA01
brw-rw---- 1 grid asmadmin 8,  33 Aug 27 14:48 GRID01
brw-rw---- 1 grid asmadmin 8,  49 Aug 27 14:48 GRID02
brw-rw---- 1 grid asmadmin 8,  65 Aug 27 14:48 GRID03
brw-rw---- 1 grid asmadmin 8,  81 Aug 27 14:48 RECOVERY01
brw-rw---- 1 grid asmadmin 8,  97 Aug 27 14:48 RECOVERY02
[root@testosb ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57

集群软件安装配置与ASM磁盘组创建

grid软件安装

#软件下载
p13390677_112040_Linux-x86-64_3of7.zip
#切换到grid用户
su - grid
#软件解压(grid用户操作)
unzip p13390677_112040_Linux-x86-64_3of7.zip
#切换root用户,先安装一个磁盘检测工具(在解压的文件夹里面找,其他节点也需要安装)
grid/rpm/cvuqdisk-1.0.9-1.rpm
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

以grid用户启动vncserver

设置的密码为:rootroot

[grid@testosa:/soft]$vncserver

You will require a password to access your desktops.

Password:
Password must be at least 6 characters - try again
Password:
Verify:
Would you like to enter a view-only password (y/n)? n  
A view-only password is not used
xauth:  file /home/grid/.Xauthority does not exist

New 'testosa:1 (grid)' desktop is testosa:1

Creating default startup script /home/grid/.vnc/xstartup
Creating default config /home/grid/.vnc/config
Starting applications specified in /home/grid/.vnc/xstartup
Log file is /home/grid/.vnc/testosa:1.log
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

远程使用vnc客户端,调用vncserver使用图形化界面安装

image-20230827152801452

image-20230827161022544

#注意使用-jreLoc选项,否则界面里面,提示框不能放大
./runInstaller -jreLoc /etc/alternatives/jre_1.8.0
  • 1
  • 2

image-20230827161057447

image-20230827161121420

image-20230827161155397

image-20230827161230923

image-20230827161524028

如上这里的scanname一定要和配置的hosts里面一致:

[root@testosa ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.81 testosa
192.168.1.82 testosb
192.168.1.83 testosc

10.10.100.81 aprv
10.10.100.82 bprv
10.10.100.83 cprv

192.168.1.84  avip
192.168.1.85  bvip
192.168.1.86  cvip

192.168.1.80  scanip
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

image-20230827163012611

检查ssh信任关系(每个节点都检查)

image-20230827163131802

image-20230827163248055

image-20230827163329048

image-20230827163550484

image-20230827163816812

image-20230827163901837

设置密码oracle

image-20230827163934225

image-20230827164035134

image-20230827164117571

image-20230827164205255

image-20230827164345237

尝试自动修复

image-20230827164441013

根据提示操作如下

[root@testosa ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh
Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response
Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable
Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log
Setting Kernel Parameters...
kernel.shmmax = 1610612736
kernel.shmmax = 1977098240
kernel.shmall = 393216
kernel.shmall = 2097152
Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm
Preparing...                          ################################# [100%]
        package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed
[root@testosa ~]# 


[root@testosb ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh
Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response
Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable
Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log
Setting Kernel Parameters...
kernel.shmmax = 1610612736
kernel.shmmax = 1977094144
kernel.shmall = 393216
kernel.shmall = 2097152
Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm
Preparing...                          ################################# [100%]
        package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed
[root@testosb ~]# 

[root@testosc ~]# /tmp/CVU_11.2.0.4.0_grid/runfixup.sh
Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response
Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable
Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log
Setting Kernel Parameters...
kernel.shmmax = 1610612736
kernel.shmmax = 1977094144
kernel.shmall = 393216
kernel.shmall = 2097152
Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm
Preparing...                          ################################# [100%]
        package cvuqdisk-1.0.10-1.x86_64 (which is newer than cvuqdisk-1.0.9-1.x86_64) is already installed
[root@testosc ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42

执行完回来点OK,继续检查约束条件

image-20230827165034840

安装pdksh-5.2.14-30.x86_64.rpm包(每个节点都安装)

[root@testosa soft]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
warning: pdksh-5.2.14-37.el5.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID e8562897: NOKEY
error: Failed dependencies:
        pdksh conflicts with (installed) ksh-20120801-139.el7.x86_64
[root@testosa soft]# rpm -evh ksh-20120801-139.el7.x86_64
Preparing...                          ################################# [100%]
Cleaning up / removing...
   1:ksh-20120801-139.el7             ################################# [100%]        
[root@testosa soft]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
warning: pdksh-5.2.14-37.el5.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID e8562897: NOKEY
Preparing...                          ################################# [100%]
Updating / installing...
   1:pdksh-5.2.14-37.el5              ################################# [100%]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

image-20230827172505372

Device Checks for ASM

Device Checks for ASM - This is a pre-check to verify if the specified devices meet the requirements for configuration through the Oracle Universal Storage Manager Configuration Assistant.  Error: 
 - 
testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"].  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 
 - 
testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"].  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 
 - 
testosc:PRVF-7533 : Proper version of package "cvuqdisk" is not found on node "testosc" [Required = "1.0.9-1" ; Found = "1.0.10-1"].  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 

  Operation Failed on Nodes: [testosc,  testosb,  testosa]  
Verification result of failed node: testosc 

 Details: 
 - 
Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 
 - 
PRVF-9802 : Attempt to get udev info from node "testosc" failed  - Cause:  Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check.  - Action:  Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check. 
Back to Top  
Verification result of failed node: testosb 

 Details: 
 - 
Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 
 - 
PRVF-9802 : Attempt to get udev info from node "testosb" failed  - Cause:  Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check.  - Action:  Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check. 
Back to Top  
Verification result of failed node: testosa 

 Details: 
 - 
Unable to determine the shareability of device /dev/oracleasm/disks/GRID01 on nodes: testosa,testosb,testosc  - Cause: Cause Of Problem Not Available  - Action: User Action Not Available 
 - 
PRVF-9802 : Attempt to get udev info from node "testosa" failed  - Cause:  Attempt to read the udev permissions file failed, probably due to missing permissions directory, missing or invalid permissions file, or permissions file not accessible to use account running the check.  - Action:  Make sure that the udev permissions directory is created, the udev permissions file is available, and it has correct read permissions for access by the user running the check. 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32

Network Time Protocol (NTP)

Network Time Protocol (NTP) - This task verifies cluster time synchronization on clusters that use Network Time Protocol (NTP).  Error: 
 - 
PRVF-5507 : NTP daemon or service is not running on any node but NTP configuration file exists on the following node(s): testosa  - Cause:  The configuration file was found on at least one node though no NTP daemon or service was running.  - Action:  If you plan to use CTSS for time synchronization then NTP configuration must be uninstalled on all nodes of the cluster. 

  Check Failed on Nodes: [testosc,  testosb,  testosa]  
Verification result of failed node: testosc 

 Details: 
 - 
PRVF-5402 : Warning: Could not find NTP configuration file "/etc/ntp.conf" on node "testosc"  - Cause:  NTP might not have been configured on the node, or NTP might have been configured with a configuration file different from the one indicated.  - Action:  Configure NTP on the node if not done so yet. Refer to your NTP vendor documentation for details. 
Back to Top  
Verification result of failed node: testosb 

 Details: 
 - 
PRVF-5402 : Warning: Could not find NTP configuration file "/etc/ntp.conf" on node "testosb"  - Cause:  NTP might not have been configured on the node, or NTP might have been configured with a configuration file different from the one indicated.  - Action:  Configure NTP on the node if not done so yet. Refer to your NTP vendor documentation for details. 
Back to Top  
Verification result of failed node: testosa 

#如上删除主节点的 /etc/ntp.conf 文件
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

image-20230827173842070

这个报错可以忽略

image-20230827173935666

image-20230827175032142

报错信息

Remote 'AttachHome' failed on nodes: 'testosb,testosc'. Refer to '/oracle/app/oraInventory/logs/installActions2023-08-27_04-26-34PM.log' for details.
It is recommended that the following command needs to be manually run on the failed nodes: 
 /oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=<node on which command is to be run>. 
Please refer 'AttachHome' logs under central inventory of remote nodes where failure occurred for more details.
  • 1
  • 2
  • 3
  • 4

查看日志

INFO: Invoking OUI on cluster nodes testosb
INFO: /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini  -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM
SEVERE: oracle.sysman.oii.oiip.oiipg.OiipgRemoteOpsException: Error occured while trying to run Unix command /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini  -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM on nodes testosb. [Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB.   Actual 7628 MB    PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed]
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:276)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runAnyCmdOnNodes(OiipgClusterRunCmd.java:369)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmd(OiipgClusterRunCmd.java:314)
        at oracle.sysman.oii.oiic.OiicBaseInventoryApp.runRemoteInvOpCmd(OiicBaseInventoryApp.java:281)
        at oracle.sysman.oii.oiic.OiicAttachHome.clsCmdAttachHome(OiicAttachHome.java:507)
        at oracle.sysman.oii.oiif.oiifw.OiifwClusterSaveInventoryWCCE.doOperation(OiifwClusterSaveInventoryWCCE.java:310)
        at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171)
        at oracle.sysman.oii.oiif.oiifw.OiifwActionsPhaseWCDE.doOperation(OiifwActionsPhaseWCDE.java:641)
        at oracle.sysman.oii.oiif.oiifb.OiifbLinearIterator.iterate(OiifbLinearIterator.java:147)
        at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicAPISelCompsInstall.doOperation(OiicInstallAPISession.java:1095)
        at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171)
        at oracle.sysman.oii.oiic.OiicInstallAPISession.doInstallAction(OiicInstallAPISession.java:679)
        at oracle.sysman.oii.oiic.OiicInstallAPISession.access$000(OiicInstallAPISession.java:94)
        at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicActionsThread.run(OiicInstallAPISession.java:971)
Caused by: oracle.ops.mgmt.cluster.ClusterException: Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB.   Actual 7628 MB    PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed
        at oracle.ops.mgmt.cluster.ClusterCmd.runCmd(ClusterCmd.java:2149)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:270)
        ... 13 more

INFO: Running command '/tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini  -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM' on the nodes 'testosc'.
INFO: Invoking OUI on cluster nodes testosc
INFO: /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini  -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM
SEVERE: oracle.sysman.oii.oiip.oiipg.OiipgRemoteOpsException: Error occured while trying to run Unix command /tmp/OraInstall2023-08-27_04-26-34PM/oui/bin/platform/linux64/runInstaller -jreLoc /etc/alternatives/jre_1.8.0 -paramFile /tmp/OraInstall2023-08-27_04-26-34PM/oui/clusterparam.ini  -silent -ignoreSysPrereqs -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc -remoteInvocation -invokingNodeName testosa -logFilePath "/oracle/app/oraInventory/logs" -timestamp 2023-08-27_04-26-34PM on nodes testosc. [Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB.   Actual 7628 MB    PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed]
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:276)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runAnyCmdOnNodes(OiipgClusterRunCmd.java:369)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmd(OiipgClusterRunCmd.java:314)
        at oracle.sysman.oii.oiic.OiicBaseInventoryApp.runRemoteInvOpCmd(OiicBaseInventoryApp.java:281)
        at oracle.sysman.oii.oiic.OiicAttachHome.clsCmdAttachHome(OiicAttachHome.java:507)
        at oracle.sysman.oii.oiif.oiifw.OiifwClusterSaveInventoryWCCE.doOperation(OiifwClusterSaveInventoryWCCE.java:310)
        at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171)
        at oracle.sysman.oii.oiif.oiifw.OiifwActionsPhaseWCDE.doOperation(OiifwActionsPhaseWCDE.java:641)
        at oracle.sysman.oii.oiif.oiifb.OiifbLinearIterator.iterate(OiifbLinearIterator.java:147)
        at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicAPISelCompsInstall.doOperation(OiicInstallAPISession.java:1095)
        at oracle.sysman.oii.oiif.oiifb.OiifbCondIterator.iterate(OiifbCondIterator.java:171)
        at oracle.sysman.oii.oiic.OiicInstallAPISession.doInstallAction(OiicInstallAPISession.java:679)
        at oracle.sysman.oii.oiic.OiicInstallAPISession.access$000(OiicInstallAPISession.java:94)
        at oracle.sysman.oii.oiic.OiicInstallAPISession$OiicActionsThread.run(OiicInstallAPISession.java:971)
Caused by: oracle.ops.mgmt.cluster.ClusterException: Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB.   Actual 7628 MB    PassedThe Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.: No such file or directory :failed
        at oracle.ops.mgmt.cluster.ClusterCmd.runCmd(ClusterCmd.java:2149)
        at oracle.sysman.oii.oiip.oiipg.OiipgClusterRunCmd.runCmdOnUnix(OiipgClusterRunCmd.java:270)
        ... 13 more

SEVERE: Remote 'AttachHome' failed on nodes: 'testosb,testosc'. Refer to '/oracle/app/oraInventory/logs/installActions2023-08-27_04-26-34PM.log' for details.
It is recommended that the following command needs to be manually run on the failed nodes: 
 /oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=<node on which command is to be run>. 
Please refer 'AttachHome' logs under central inventory of remote nodes where failure occurred for more details.
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49

根据建议执行命令

/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb

/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc



[grid@testosb:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb
Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 7628 MB    Passed
The Java RunTime Environment was not found at /etc/alternatives/jre_1.8.0/bin/java. Hence, the Oracle Universal Installer cannot be run.
Please visit http://www.javasoft.com and install JRE version 1.3.1 or higher and try again.
: No such file or directory
[grid@testosb:/home/grid]$

#检查java
[root@testosa ~]# java -version
openjdk version "1.8.0_382"
OpenJDK Runtime Environment (build 1.8.0_382-b05)
OpenJDK 64-Bit Server VM (build 25.382-b05, mixed mode)
[root@testosa ~]# ll /etc/alternatives/jre_1.8.0/bin/java 
-rwxr-xr-x 1 root root 8984 Aug 11 22:45 /etc/alternatives/jre_1.8.0/bin/java

[root@testosb ~]# which java
/usr/bin/which: no java in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin)
[root@testosb ~]# ll /etc/alternatives/jre_1.8.0/bin/java
ls: cannot access /etc/alternatives/jre_1.8.0/bin/java: No such file or directory
[root@testosb ~]# 

[root@testosc ~]# which java
/usr/bin/which: no java in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin)
[root@testosc ~]# ll /etc/alternatives/jre_1.8.0/bin/java
ls: cannot access /etc/alternatives/jre_1.8.0/bin/java: No such file or directory
[root@testosc ~]# 


#其他两个节点安装java
cd /etc/alternatives/
tar -zxvf /soft/jre-8u202-linux-x64.tar.gz
mv /etc/alternatives/jre1.8.0_202 /etc/alternatives/jre_1.8.0
echo 'export PATH=$PATH:/etc/alternatives/jre_1.8.0/bin:$HOME/bin' >> /etc/profile
source /etc/profile


#重新执行建议的命令
[grid@testosb:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosb
Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 7628 MB    Passed
The inventory pointer is located at /etc/oraInst.loc
The inventory is located at /oracle/app/oraInventory
Please execute the '/oracle/app/oraInventory/orainstRoot.sh' script at the end of the session.
'AttachHome' was successful.
[grid@testosb:/home/grid]$




[grid@testosc:/home/grid]$/oracle/app/11.2.0/grid/oui/bin/runInstaller -attachHome -noClusterEnabled ORACLE_HOME=/oracle/app/11.2.0/grid ORACLE_HOME_NAME=Ora11g_gridinfrahome1 CLUSTER_NODES=testosa,testosb,testosc "INVENTORY_LOCATION=/oracle/app/oraInventory" LOCAL_NODE=testosc
Starting Oracle Universal Installer...

Checking swap space: must be greater than 500 MB.   Actual 7628 MB    Passed
The inventory pointer is located at /etc/oraInst.loc
The inventory is located at /oracle/app/oraInventory
Please execute the '/oracle/app/oraInventory/orainstRoot.sh' script at the end of the session.
'AttachHome' was successful.
[grid@testosc:/home/grid]$
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67

image-20230827175032142

执行修复后点OK

image-20230827183211829

如上根据提示执行脚本

顺序执行 先在节点a上执行完毕之后,再在其他节点执行

[root@testosa ~]# /oracle/app/oraInventory/orainstRoot.sh
Changing permissions of /oracle/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /oracle/app/oraInventory to oinstall.
The execution of the script is complete.
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

执行root脚本报错,报错如下

ohasd failed to start
Failed to start the Clusterware. Last 20 lines of the alert log follow: 
2023-08-27 18:37:06.861: 
[client(90262)]CRS-2101:The OLR was formatted using version 3.
  • 1
  • 2
  • 3
  • 4

原因:因为centos7 使用的sysemd而不时initd运行继承和重启进程,而root.sh通过传统的initd运行ohasd进程

解决方法:在centos7中ohasd需要被设置为一个服务,在运行脚本root.sh之前。

#以root用户创建服务文件
cat > /usr/lib/systemd/system/ohas.service << "EOF"
[Unit]
Description=Oracle High Availability Services
After=syslog.target
[Service]
ExecStart=/etc/init.d/init.ohasd run >/dev/null 2>&1 Type=simple
Restart=always
[Install]
WantedBy=multi-user.target
EOF

chmod 777 /usr/lib/systemd/system/ohas.service

systemctl daemon-reload
systemctl enable ohas.service
systemctl start ohas.service

#查看ohas服务状态
[root@testosa ~]# systemctl status ohas.service
* ohas.service - Oracle High Availability Services
   Loaded: loaded (/usr/lib/systemd/system/ohas.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2023-08-27 18:48:57 CST; 6s ago
 Main PID: 91992 (init.ohasd)
    Tasks: 1
   CGroup: /system.slice/ohas.service
           `-91992 /bin/sh /etc/init.d/init.ohasd run >/dev/null 2>&1 Type=simple

Aug 27 18:48:57 testosa systemd[1]: Started Oracle High Availability Services.
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30

重新执行root脚本

[root@testosa ~]# /oracle/app/11.2.0/grid/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /oracle/app/11.2.0/grid

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /oracle/app/11.2.0/grid/crs/install/crsconfig_params
User ignored Prerequisites during installation
Installing Trace File Analyzer
CRS-2672: Attempting to start 'ora.mdnsd' on 'testosa'
CRS-2676: Start of 'ora.mdnsd' on 'testosa' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'testosa'
CRS-2676: Start of 'ora.gpnpd' on 'testosa' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'testosa'
CRS-2672: Attempting to start 'ora.gipcd' on 'testosa'
CRS-2676: Start of 'ora.cssdmonitor' on 'testosa' succeeded
CRS-2676: Start of 'ora.gipcd' on 'testosa' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'testosa'
CRS-2672: Attempting to start 'ora.diskmon' on 'testosa'
CRS-2676: Start of 'ora.diskmon' on 'testosa' succeeded
CRS-2676: Start of 'ora.cssd' on 'testosa' succeeded

ASM created and started successfully.

Disk Group grid created successfully.

clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-4256: Updating the profile
Successful addition of voting disk 408bcc1da0314fcbbf945af1c73754bc.
Successful addition of voting disk 14ebdd6a82984fffbf611eb50607a32f.
Successful addition of voting disk 46e5bed09bd34fccbffa6bb39e5f694c.
Successfully replaced voting disk group with +grid.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
 1. ONLINE   408bcc1da0314fcbbf945af1c73754bc (/dev/oracleasm/disks/GRID01) [GRID]
 2. ONLINE   14ebdd6a82984fffbf611eb50607a32f (/dev/oracleasm/disks/GRID02) [GRID]
 3. ONLINE   46e5bed09bd34fccbffa6bb39e5f694c (/dev/oracleasm/disks/GRID03) [GRID]
Located 3 voting disk(s).
CRS-2672: Attempting to start 'ora.asm' on 'testosa'
CRS-2676: Start of 'ora.asm' on 'testosa' succeeded
CRS-2672: Attempting to start 'ora.GRID.dg' on 'testosa'
CRS-2676: Start of 'ora.GRID.dg' on 'testosa' succeeded
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59

同样在b、c节点执行脚本,第一次执行root脚本都会报错,报错后终止执行,启动ohas后重新执行即可

脚本执行完成后,点ok

image-20230827183211829

image-20230827193546031

这个报错是因为我们没有使用DNS做解析,可以忽略

image-20230827193741499

image-20230827193802855

image-20230827193834318

ASM创建磁盘组

grid用户下

使用命令asmca

image-20230827201300543

image-20230827201423504

image-20230827201639386

image-20230827202411488

image-20230827202459653

image-20230827202635639

image-20230827202713635

image-20230827202736659

创建好右下角exit退出即可

配置root环境变量

#把grid用户的Oracle_Home路径加入到每个节点的root环境变量里
[grid@testosa:/home/grid]$env|grep ORACLE_HOME
ORACLE_HOME=/oracle/app/11.2.0/grid

[root@testosa ~]# grep PATH ~/.bash_profile
PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin
export PATH
[root@testosa ~]# source ~/.bash_profile


[root@testosb ~]# grep PATH ~/.bash_profile
PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin
export PATH
[root@testosb ~]# 

[root@testosc ~]# grep PATH ~/.bash_profile
PATH=$PATH:/oracle/app/11.2.0/grid/bin:$HOME/bin
export PATH
[root@testosc ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19

检查ocr信息

[root@testosa ~]# ocrcheck
Status of Oracle Cluster Registry is as follows :
         Version                  :          3
         Total space (kbytes)     :     262120
         Used space (kbytes)      :       2748
         Available space (kbytes) :     259372
         ID                       : 1332412767
         Device/File Name         :      +grid
                                    Device/File integrity check succeeded

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

         Cluster registry integrity check succeeded

         Logical corruption check succeeded

[root@testosa ~]# 

#为ocr做个镜像
[root@testosa ~]# ocrconfig -add +recovery
[root@testosa ~]# 
[root@testosa ~]# ocrcheck
Status of Oracle Cluster Registry is as follows :
         Version                  :          3
         Total space (kbytes)     :     262120
         Used space (kbytes)      :       2748
         Available space (kbytes) :     259372
         ID                       : 1332412767
         Device/File Name         :      +grid
                                    Device/File integrity check succeeded
         Device/File Name         :  +recovery
                                    Device/File integrity check succeeded

                                    Device/File not configured

                                    Device/File not configured

                                    Device/File not configured

         Cluster registry integrity check succeeded

         Logical corruption check succeeded

[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50

数据库软件的安装

上传安装包

p13390677_112040_Linux-x86-64_1of7.zip
p13390677_112040_Linux-x86-64_2of7.zip
  • 1
  • 2

oracle用户解压安装包

su - oracle
unzip p13390677_112040_Linux-x86-64_1of7.zip
unzip p13390677_112040_Linux-x86-64_2of7.zip


#启动vnc
[oracle@testosa:/soft]$vncserver
You will require a password to access your desktops.

Password:
Verify:
Would you like to enter a view-only password (y/n)? m
A view-only password is not used
xauth:  file /home/oracle/.Xauthority does not exist

New 'testosa:6 (oracle)' desktop is testosa:6

Creating default startup script /home/oracle/.vnc/xstartup
Creating default config /home/oracle/.vnc/config
Starting applications specified in /home/oracle/.vnc/xstartup
Log file is /home/oracle/.vnc/testosa:6.log

[oracle@testosa:/soft]$
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23

远程使用vnc客户端调用图形化界面进行安装

cd database
./runInstaller -jreLoc /etc/alternatives/jre_1.8.0
  • 1
  • 2

image-20230827210318069

image-20230827210402364

image-20230827210437160

image-20230827210547374

检查各个节点的互信:

image-20230827210634902

image-20230827210744273

image-20230827210819764

image-20230827210945929

image-20230827211017904

image-20230827211052433

image-20230827211345746

Single Client Access Name (SCAN) - This test verifies the Single Client Access Name configuration.  Error: 
 - 
PRVG-1101 : SCAN name "scanip" failed to resolve  - Cause:  An attempt to resolve specified SCAN name to a list of IP addresses failed because SCAN could not be resolved in DNS or GNS using 'nslookup'.  - Action:  Check whether the specified SCAN name is correct. If SCAN name should be resolved in DNS, check the configuration of SCAN name in DNS. If it should be resolved in GNS make sure that GNS resource is online. 
 - 
PRVF-4657 : Name resolution setup check for "scanip" (IP address: 192.168.1.80) failed  - Cause:  Inconsistent IP address definitions found for the SCAN name identified using DNS and configured name resolution mechanism(s).  - Action:  Look up the SCAN name with nslookup, and make sure the returned IP addresses are consistent with those defined in NIS and /etc/hosts as configured in /etc/nsswitch.conf by reconfiguring the latter. Check the Name Service Cache Daemon (/usr/sbin/nscd) by clearing its cache and restarting it. 

  Check Failed on Nodes: [testosc,  testosb,  testosa]  
Verification result of failed node: testosc 

 Details: 
 - 
PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip"  - Cause:  The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified.  - Action:  Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly. 
Back to Top  
Verification result of failed node: testosb 

 Details: 
 - 
PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip"  - Cause:  The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified.  - Action:  Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly. 
Back to Top  
Verification result of failed node: testosa 

 Details: 
 - 
PRVF-4664 : Found inconsistent name resolution entries for SCAN name "scanip"  - Cause:  The nslookup utility and the configured name resolution mechanism(s), as defined in /etc/nsswitch.conf, returned inconsistent IP address information for the SCAN name identified.  - Action:  Check the Name Service Cache Daemon (/usr/sbin/nscd), the Domain Name Server (nslookup) and the /etc/hosts file to make sure the IP address for the SCAN names are registered correctly. 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

这个报错可以忽略

image-20230827211751858

image-20230827212021246

Error in invoking target 'agent nmhs' of makefile '/oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk'. See '/oracle/app/oraInventory/logs/installActions2023-08-27_09-02-21PM.log' for details.
  • 1

这是linux7系统的bug,可以规避这个错误(加个参数)

根据提示找见这个文件:/oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk

为防止后期再次使用这个文件,先做个备份

cp /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk.bak

#如下176行加上 -lnnz11
vim /oracle/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk
171 #===========================
172 #  emdctl
173 #===========================
174 
175 $(SYSMANBIN)emdctl:
176         $(MK_EMAGENT_NMECTL) -lnnz11
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

修改好之后,点Retry

image-20230827213915841

[root@testosa ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /oracle/app/oracle/product/11.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@testosa ~]# 



[root@testosb ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /oracle/app/oracle/product/11.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@testosb ~]# 



[root@testosc ~]# /oracle/app/oracle/product/11.2.0/db_1/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /oracle/app/oracle/product/11.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@testosc ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60

image-20230827214125587

创建数据库

使用dbca命令调出图像界面

image-20230827214326753

image-20230827214356932

image-20230827214456535

image-20230827214702402

image-20230827214830412

image-20230827214901554

image-20230827215016429

image-20230827215341873

image-20230827215453838

image-20230827215315347

image-20230827215618485

如上输入安装grid时候创建的管理ASM磁盘组管理员的密码:oracle

image-20230827215842725

image-20230827215945650

image-20230827220023068

image-20230827220145140

这里设置的vault的密码为:VaultPas1#

image-20230827220702928

image-20230827220739748

image-20230827220915278

image-20230827220930730

image-20230827221040509

image-20230827221118387

image-20230827221208982

image-20230827221312626

image-20230827221358718

image-20230827221430043

如上图所示:当前三个节点,每个节点一个线程,每个节点两个redo日志文件

生产环境redo文件至少200m一个,每个节点(线程)至少5个

image-20230827221623244

image-20230827221812316

image-20230827225338106

建库的时候我们选择pool的方式来管理实例,所有这里自动生成的实例名字是db名加下划线数字组合

[grid@testosa:/home/grid]$lsnrctl status

LSNRCTL for Linux: Version 11.2.0.4.0 - Production on 27-AUG-2023 22:55:55

Copyright (c) 1991, 2013, Oracle.  All rights reserved.

Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=LISTENER)))
STATUS of the LISTENER
------------------------
Alias                     LISTENER
Version                   TNSLSNR for Linux: Version 11.2.0.4.0 - Production
Start Date                27-AUG-2023 19:31:29
Uptime                    0 days 3 hr. 24 min. 25 sec
Trace Level               off
Security                  ON: Local OS Authentication
SNMP                      OFF
Listener Parameter File   /oracle/app/11.2.0/grid/network/admin/listener.ora
Listener Log File         /oracle/app/grid/diag/tnslsnr/testosa/listener/alert/log.xml
Listening Endpoints Summary...
  (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER)))
  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.81)(PORT=1521)))
  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.84)(PORT=1521)))
Services Summary...
Service "+ASM" has 1 instance(s).
  Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "rac_db" has 1 instance(s).
  Instance "racdb_1", status READY, has 1 handler(s) for this service...
Service "racdbXDB" has 1 instance(s).
  Instance "racdb_1", status READY, has 1 handler(s) for this service...
The command completed successfully
[grid@testosa:/home/grid]$




[oracle@testosa:/home/oracle]$lsnrctl status

LSNRCTL for Linux: Version 11.2.0.4.0 - Production on 27-AUG-2023 22:56:41

Copyright (c) 1991, 2013, Oracle.  All rights reserved.

Connecting to (ADDRESS=(PROTOCOL=tcp)(HOST=)(PORT=1521))
STATUS of the LISTENER
------------------------
Alias                     LISTENER
Version                   TNSLSNR for Linux: Version 11.2.0.4.0 - Production
Start Date                27-AUG-2023 19:31:29
Uptime                    0 days 3 hr. 25 min. 11 sec
Trace Level               off
Security                  ON: Local OS Authentication
SNMP                      OFF
Listener Parameter File   /oracle/app/11.2.0/grid/network/admin/listener.ora
Listener Log File         /oracle/app/grid/diag/tnslsnr/testosa/listener/alert/log.xml
Listening Endpoints Summary...
  (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=LISTENER)))
  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.81)(PORT=1521)))
  (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.84)(PORT=1521)))
Services Summary...
Service "+ASM" has 1 instance(s).
  Instance "+ASM1", status READY, has 1 handler(s) for this service...
Service "rac_db" has 1 instance(s).
  Instance "racdb_1", status READY, has 1 handler(s) for this service...
Service "racdbXDB" has 1 instance(s).
  Instance "racdb_1", status READY, has 1 handler(s) for this service...
The command completed successfully
[oracle@testosa:/home/oracle]$
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66

集群资源检查

[root@testosa ~]# crsctl status res -t
--------------------------------------------------------------------------------
NAME           TARGET  STATE        SERVER                   STATE_DETAILS       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
ora.GRID.dg
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
ora.LISTENER.lsnr
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
ora.RECOVERY.dg
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
ora.asm
               ONLINE  ONLINE       testosa                  Started             
               ONLINE  ONLINE       testosb                  Started             
               ONLINE  ONLINE       testosc                  Started             
ora.gsd
               OFFLINE OFFLINE      testosa                                      
               OFFLINE OFFLINE      testosb                                      
               OFFLINE OFFLINE      testosc                                      
ora.net1.network
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
ora.ons
               ONLINE  ONLINE       testosa                                      
               ONLINE  ONLINE       testosb                                      
               ONLINE  ONLINE       testosc                                      
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       testosa                                      
ora.cvu
      1        ONLINE  ONLINE       testosa                                      
ora.oc4j
      1        ONLINE  ONLINE       testosa                                      
ora.rac_db.db
      1        ONLINE  ONLINE       testosa                  Open                
      2        ONLINE  ONLINE       testosb                  Open                
      3        ONLINE  ONLINE       testosc                  Open                
ora.scan1.vip
      1        ONLINE  ONLINE       testosa                                      
ora.testosa.vip
      1        ONLINE  ONLINE       testosa                                      
ora.testosb.vip
      1        ONLINE  ONLINE       testosb                                      
ora.testosc.vip
      1        ONLINE  ONLINE       testosc                                      
[root@testosa ~]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60

参考资料

https://blog.csdn.net/mengxiangfeiyang/article/details/129034747
http://www.fgedu.net.cn/7427.html
https://blog.51cto.com/u_12991611/5720816
https://blog.csdn.net/krusher2016/article/details/104462844/

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/568901
推荐阅读
相关标签
  

闽ICP备14008679号