环境:oracle 11.2.0.4 rac
修改前IP地址
# public ip
192.168.2.71 db1
192.168.2.72 db2
192.168.2.76 db3
#priv ip
200.100.100.11 db1-priv
200.100.100.12 db2-priv
200.100.100.13 db3-priv
#vip ip
192.168.2.73 db1-vip
192.168.2.74 db2-vip
192.168.2.77 db3-vip
#scan ip
192.168.2.75 db-scan
修改后IP地址
# public ip
192.168.1.71 db1
192.168.1.72 db2
192.168.1.76 db3
#priv ip
100.100.100.11 db1-priv
100.100.100.12 db2-priv
100.100.100.13 db3-priv
#vip ip
192.168.1.73 db1-vip
192.168.1.74 db2-vip
192.168.1.77 db3-vip
#scan ip
192.168.1.75 db-scan
1、查看ocr的备份
[root@db1 bin]# ./ocrconfig -showbackup
2、备份OCR
[root@db1 bin]# ./ocrconfig -manualbackup
db3 2015/01/29 19:58:22 /u01/app/11.2.0/grid/cdata/db-scan/backup_20150129_195822.ocr
3、关闭数据库
srvctl stop database -d db
4、在任意节点上修改:
这里我是在节点1上面操作的
[root@db1 ~]# cd /u01/app/11.2.0/grid/bin/
[root@db1 bin]# ./oifcfg getif
eth0 192.168.2.0 global public
eth2 200.100.100.0 global cluster_interconnect
[root@db1 bin]# ./oifcfg delif -global eth0
[root@db1 bin]# ./oifcfg setif -global eth0/192.168.1.0:public
[root@db1 bin]# ./oifcfg getif
eth2 200.100.100.0 global cluster_interconnect
eth0 192.168.1.0 global public
[root@db1 bin]# ./oifcfg delif -global eth2
PRIF-31: Failed to delete the specified network interface because it is the last private interface
这里不允许删除最后一个私有接口,可以先添加一个私有接口再来删除
[root@db1 bin]# ./oifcfg setif -global eth2/200.100.100.0:cluster_interconnect
[root@db1 bin]# ./oifcfg getif
eth2 100.100.100.0 global cluster_interconnect
eth0 192.168.2.0 global public
eth2 200.100.100.0 global cluster_interconnect
[root@db1 bin]# ./oifcfg delif -global eth2/100.100.100.0:cluster_interconnect
[root@db1 bin]# ./oifcfg getif
eth0 192.168.2.0 global public
eth2 200.100.100.0 global cluster_interconnect
在某节点停止oracle高可能用性服务
[root@db1 bin]# ./crsctl stop cluster -all
5、修改IP地址及/etc/hosts文件并测试
6、在所有节点重新启动所有服务
[root@db1 bin]#./crsctl start cluster -all
7、停止scan_listener和scan
[root@db1 bin]#./srvctl stop scan_listener
[root@db1 bin]#./srvctl stop scan
8、删除scan_listener和scan
[root@db1 bin]#./srvctl remove scan_listener -f
[root@db1 bin]#./srvctl remove scan -f
9、添加scan和scan_listener
[root@db1 bin]#./srvctl add scan -n db-scan -k 2 -S 192.168.1.0/255.255.255.0/eth0
说明:上面命令中-k 为1(network number)会报错,因为之前已经被使用过
[root@db1 bin]# ./srvctl config scan
SCAN name: db-scan, Network: 2/192.168.1.0/255.255.255.0/eth0
SCAN VIP name: scan1, IP: /db-scan/192.168.1.75
[root@db1 bin]#./srvctl add scan_listener
10、启动scan和scan_listener
[root@db1 bin]#./srvctl start scan
[root@db1 bin]#./srvctl start scan_listener
11、停止VIP资源
[root@db1 bin]# ./crsctl stop resource ora.db1.vip -f
[root@db1 bin]# ./crsctl stop resource ora.db2.vip -f
[root@db1 bin]# ./crsctl stop resource ora.db3.vip -f
这里可以通过下面命令查看vip的状态
[root@db1 bin]# ./crsctl stat res -t
ora.db1.vip
1 OFFLINE OFFLINE
ora.db2.vip
1 OFFLINE OFFLINE
ora.db3.vip
1 OFFLINE OFFLINE
12、修改vip地址
[root@db1 bin]# ./srvctl modify nodeapps -A 192.168.1.73/255.255.255.0/eth0 -n db1
[root@db1 bin]# ./srvctl modify nodeapps -A 192.168.1.74/255.255.255.0/eth0 -n db2
[root@db1 bin]# ./srvctl modify nodeapps -A 192.168.1.77/255.255.255.0/eth0 -n db3
13、启动VIP资源:
[root@db1 bin]# ./crsctl start resource ora.db1.vip -f
CRS-2672: Attempting to start 'ora.db1.vip' on 'db1'
CRS-2676: Start of 'ora.db1.vip' on 'db1' succeeded
[root@db1 bin]# ./crsctl start resource ora.db2.vip -f
CRS-2672: Attempting to start 'ora.db2.vip' on 'db2'
CRS-2676: Start of 'ora.db2.vip' on 'db2' succeeded
[root@db1 bin]# ./crsctl start resource ora.db3.vip -f
CRS-2672: Attempting to start 'ora.db3.vip' on 'db3'
CRS-2676: Start of 'ora.db3.vip' on 'db3' succeeded
启动VIP资源后本地监听就已经启动了,下面第14步操作可以不执行
14、启动本地监听:
[root@db1 bin]# ./srvctl start listener -n db1
[root@db1 bin]# ./srvctl start listener -n db2
[root@db1 bin]# ./srvctl start listener -n db3
问题:操作完成之后执行oifcfg getif,出现在下面的提示信息
[grid@db1 peer]$ oifcfg getif
eth0 192.168.1.0 global public
eth2 100.100.100.0 global cluster_interconnect
Only in OCR: eth2: 100.100.100.0 global cluster_interconnect
PRIF-30: Network information in OCR and GPnP profile differs
说网络信息在OCR与GPnP不一致,这里重新配置一下即可
[grid@db1 peer]$ oifcfg setif -global eth2/100.100.100.0:cluster_interconnect
[grid@db1 peer]$ oifcfg getif
eth0 192.168.1.0 global public
eth2 100.100.100.0 global cluster_interconnect
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。