环境 RDBMS 11.2.0.4 node
修改RAC 的IP地址,包括public、VIP 、SCAN 等IP。不包含private IP 。sql
步骤
1 关闭库,监听,CRS等
2 修改/etc/hosts
3 OS层面修改IP
4 启动CRS
5 修改public 、VIP 、SCAN 等IP
6 修改private IP -- 暂无数据库
-- 原地址,192.168.2.x网段, 修改为192.168.1.X网段 .先不更改private oracle
[root@host02 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 # public #192.168.2.101 host01 192.168.2.102 host02 192.168.2.107 host03 #vip #192.168.2.103 host01-vip 192.168.2.104 host02-vip 192.168.2.108 host03-vip #priv #192.168.0.101 host01-priv 192.168.0.102 host02-priv 192.168.0.107 host03-priv #scan 192.168.2.111 cluster-scan
1 关闭库, 监听, crs 等app
-- 先禁用crs随操做系统启动dom
[root@host02 grid]# crsctl disable crs; CRS-4621: Oracle High Availability Services autostart is disabled. [root@host02 grid]# [root@host03 grid]# crsctl disable crs CRS-4621: Oracle High Availability Services autostart is disabled. [root@host03 grid]#
-- 关闭数据库 操作系统
[grid@host02 ~]$ srvctl status database -d racdb Instance racdb2 is running on node host02 Instance racdb3 is running on node host03 [grid@host02 ~]$ srvctl stop database -d racdb [grid@host02 ~]$ srvctl status database -d racdb Instance racdb2 is not running on node host02 Instance racdb3 is not running on node host03 [grid@host02 ~]$
-- 查看scan 信息,原来的scan信息是192.168.2这个网段上的rest
[grid@host02 ~]$ srvctl config scan SCAN name: cluster-scan, Network: 1/192.168.2.0/255.255.255.0/eth1 SCAN VIP name: scan1, IP: /cluster-scan/192.168.2.111 [grid@host02 ~]$ srvctl config listener Name: LISTENER Network: 1, Owner: grid Home: <CRS home> End points: TCP:1521
-- 关闭监听,关闭crscode
[grid@host02 ~]$ srvctl stop listener [root@host02 grid]# crsctl stop crs [root@host03 grid]# crsctl stop crs
2 修改/etc/hosts ip
3 OS 上修改网卡信息
vi /etc/sysconfig/network-scripts/ifcfg-eth1 service network restart
而后在虚拟机上从新配置网卡链接方式,(在宿主机上新创建一个虚拟网卡虚拟机桥接在该网卡上)
4 启动crs
[root@host03 grid]# crsctl start crs CRS-4123: Oracle High Availability Services has been started. [root@host03 grid]# [root@host02 grid]# crsctl start crs CRS-4123: Oracle High Availability Services has been started. [root@host02 grid]#
5 修改public 、vip 、scan ip
-- 目前看到的public IP仍是192.168.2.X这个网段
[root@host03 grid]# oifcfg getif eth1 192.168.2.0 global public eth2 192.168.0.0 global cluster_interconnect [root@host03 grid]# [root@host02 grid]# oifcfg getif eth1 192.168.2.0 global public eth2 192.168.0.0 global cluster_interconnect [root@host02 grid]#
-- 开始修改public
[root@host03 grid]# oifcfg delif -global eth1 [root@host03 grid]# oifcfg setif -global eth1/192.168.1.0:public [root@host03 grid]# [root@host02 grid]# oifcfg delif -global eth1 [root@host02 grid]# oifcfg setif -global eth1/192.168.1.0:public [root@host02 grid]#
-- 再次查看public ip, public ip已经修改过来了
[root@host03 grid]# oifcfg getif eth2 192.168.0.0 global cluster_interconnect eth1 192.168.1.0 global public [root@host03 grid]# [root@host02 grid]# oifcfg getif eth2 192.168.0.0 global cluster_interconnect eth1 192.168.1.0 global public [root@host02 grid]#
-- 修改VIP
-- 首先要中止数据库和监听
[root@host03 grid]# srvctl status vip -n host02 VIP host02-vip is enabled VIP host02-vip is not running [root@host03 grid]# srvctl status vip -n host03 VIP host03-vip is enabled VIP host03-vip is not running [root@host03 grid]#
-- 查看当前vip设置。 能够看到IP本身已经变过来了。可是显示的网段没有变过来。
[root@host03 grid]# olsnodes -s host02 Active host03 Active [root@host03 grid]# srvctl config vip -n host03 VIP exists: /host03-vip/192.168.1.108/192.168.2.0/255.255.255.0/eth1, hosting node host03 [root@host03 grid]# srvctl config vip -n host02 VIP exists: /host02-vip/192.168.1.104/192.168.2.0/255.255.255.0/eth1, hosting node host02 [root@host03 grid]#
-- 修改,则按照如下的方法.上面看到的vip是有问题的,由于每一个节点上的网段不同,须要修改
[root@host02 grid]# srvctl modify nodeapps -n host02 -A 192.168.1.104/255.255.255.0/eth1 [root@host02 grid]# srvctl modify nodeapps -n host03 -A 192.168.1.108/255.255.255.0/eth1 [root@host02 grid]#
-- 再次查看vip ,能够看到网段已经变动过来了,是192.168.1.0
[root@host02 grid]# srvctl config vip -n host02 VIP exists: /host02-vip/192.168.1.104/192.168.1.0/255.255.255.0/eth1, hosting node host02 [root@host02 grid]# srvctl config vip -n host03 VIP exists: /host03-vip/192.168.1.108/192.168.1.0/255.255.255.0/eth1, hosting node host03 [root@host02 grid]#
-- 启动vip ,好像上面修改后,VIP就本身起来了
[root@host02 grid]# srvctl start vip -n host02 PRKO-2420 : VIP is already started on node(s): host02 [root@host02 grid]# srvctl start vip -n host03 PRKO-2420 : VIP is already started on node(s): host03 [root@host02 grid]#
-- 修改监听地址
-- 启动监听,由于以前host03上监听已经起来了,因此只启动host02上的监听
[root@host02 grid]# srvctl start listener -n host02 [root@host02 grid]# lsnrctl status
-- 检查监听的参数 local_listener ,remote_listener;
-- 修改scan参数
-- 查看scan当前的信息,能够看到scan的ip仍是192.168.2这个网段
[root@host02 grid]# srvctl config scan SCAN name: cluster-scan, Network: 1/192.168.1.0/255.255.255.0/eth1 SCAN VIP name: scan1, IP: /cluster-scan/192.168.2.111 [root@host02 grid]#
--进行修改scan ip ,并查看scan,发现scan ip已经修改过来了
[root@host02 grid]# srvctl modify scan -n cluster-scan [root@host02 grid]# srvctl config scan SCAN name: cluster-scan, Network: 1/192.168.1.0/255.255.255.0/eth1 SCAN VIP name: scan1, IP: /cluster-scan/192.168.1.111 [root@host02 grid]#
--链接验证
@>conn sys/oracle@192.168.1.111/racdb as sysdba Connected. SYS@192.168.1.111/racdb>select open_mode from v$database; OPEN_MODE ---------------------------------------- READ WRITE SYS@192.168.1.111/racdb>
END