Oracle ASM multipath udev 多路徑 綁定設定

 

 

1. 安裝 multipath 模組

# rpm -qa | grep device-mapper-multipath
# yum install -y device-mapper-multipath

2. 啟用開機啟動 multipath 服務

# systemctl enable multipathd.service
# systemctl list-unit-files |grep multipath

3. 確認 multipath 模組是否已載入

# lsmod|grep dm_multipath

-- Load Module
# modprobe dm-multipath
# modprobe dm-round-robin

# lsmod|grep dm_multipath
dm_multipath           27792  14 dm_round_robin,dm_service_time
dm_mod                124501  45 dm_multipath,dm_log,dm_mirror

4. 產生多路徑設定,並啟動服務

# mpathconf --enable
# systemctl start multipathd.service

5. 查看 wwid

# cat /etc/multipath/bindings

mpatha 1VMware_VITDEVIDeacb8861a5ab421836115cb9018f9f10
mpathb 1VMware_VITDEVIDfbcb8861ccc5847e064e5cb9018f9c04
mpathc 1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8
mpathd 1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34
mpathe 1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34
mpathf 1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8
mpathg 1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04
mpathh 1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8
mpathi 1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8
mpathj 1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8
mpathk 1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8
mpathl 1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34
mpathm 1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10
mpathn 1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04
mpatho 36001405d4c80f6610234761a654d3302
# multipath -ll

mpathe (1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34) dm-14 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:2 sdk 8:160 active ready running
mpathd (1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34) dm-13 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:1 sdj 8:144 active ready running
mpathc (1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8) dm-12 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:0 sdi 8:128 active ready running
mpathn (1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04) dm-11 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:5 sdh 8:112 active ready running
mpathm (1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10) dm-10 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:4 sdg 8:96  active ready running
mpathl (1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34) dm-9 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:3 sdf 8:80  active ready running
mpathk (1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8) dm-8 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:2 sde 8:64  active ready running
mpathj (1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8) dm-7 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:1 sdd 8:48  active ready running
mpathi (1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8) dm-6 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:0 sdc 8:32  active ready running
mpathh (1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8) dm-17 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:5 sdn 8:208 active ready running
mpathg (1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04) dm-16 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:4 sdm 8:192 active ready running
mpathf (1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8) dm-15 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:3 sdl 8:176 active ready running

6. 設定 multipath.conf 文件

# vim /etc/multipath.conf

multipaths {
	multipath {
		wwid 1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34
		alias FRA01_1
	}
	multipath {
		wwid 1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8
		alias FRA02_1
	}
	multipath {
		wwid 1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8
		alias DATA01_1
	}
	multipath {
		wwid 1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34 
		alias DATA02_1
	}
	multipath {
		wwid 1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04
		alias OCR01_1
	}
	multipath {
		wwid 1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8 
		alias OCR02_1
	}
	multipath {
		wwid 1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8
		alias DATA01_2
	}
	multipath {
		wwid 1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8
		alias DATA02_2
	}
	multipath {
		wwid 1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8
		alias FRA01_2
	}
	multipath {
		wwid 1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34
		alias FRA02_2
	}
	multipath {
		wwid 1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10
		alias OCR01_2
	}
	multipath {
		wwid 1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04
		alias OCR02_2
	}
}

7. 重啟服務並檢查名稱是否已更正

# systemctl restart multipathd.service
# multipath -ll

OCR01_2 (1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10) dm-15 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:4 sdm 8:192 active ready running
OCR02_2 (1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04) dm-17 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:5 sdo 8:224 active ready running
OCR01_1 (1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04) dm-13 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:4 sdl 8:176 active ready running
OCR02_1 (1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8) dm-16 VMware  ,Virtual SAN     
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:5 sdn 8:208 active ready running
DATA01_2 (1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8) dm-6 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:0 sdd 8:48  active ready running
DATA02_2 (1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8) dm-9 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:1 sdf 8:80  active ready running
DATA01_1 (1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8) dm-7 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:0 sdc 8:32  active ready running
DATA02_1 (1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34) dm-8 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:1 sdg 8:96  active ready running
FRA01_2 (1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8) dm-11 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:2 sdi 8:128 active ready running
FRA02_2 (1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34) dm-14 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 34:0:0:3 sdk 8:160 active ready running
FRA01_1 (1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34) dm-10 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:2 sdh 8:112 active ready running
FRA02_1 (1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8) dm-12 VMware  ,Virtual SAN     
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 33:0:0:3 sdj 8:144 active ready running
# ls -ltr /dev/mapper/

total 0
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 FRA02_1 -> ../dm-12
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 OCR02_2 -> ../dm-17
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 OCR01_1 -> ../dm-13
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 FRA02_2 -> ../dm-14
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 OCR01_2 -> ../dm-15
lrwxrwxrwx. 1 root root       7 Feb 21 17:42 DATA01_1 -> ../dm-7
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 OCR02_1 -> ../dm-16
lrwxrwxrwx. 1 root root       7 Feb 21 17:42 DATA02_1 -> ../dm-8
lrwxrwxrwx. 1 root root       7 Feb 21 17:42 DATA02_2 -> ../dm-9
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 FRA01_1 -> ../dm-10
lrwxrwxrwx. 1 root root       8 Feb 21 17:42 FRA01_2 -> ../dm-11
lrwxrwxrwx. 1 root root       7 Feb 21 17:42 DATA01_2 -> ../dm-6

8. 在 /etc/udev/rules.d 建立 asm udev 檔案 (副檔名一定是 .rules,否則 udev 會讀不到)

# vim /etc/udev/rules.d/99-oracle-asmdevices.rules

ENV{DM_UUID}=="mpath-1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/FRA01_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/FRA02_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/DATA01_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/DATA02_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/OCR01_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/OCR02_1"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/DATA01_2"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/DATA02_2"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/FRA01_2"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/FRA02_2"
ENV{DM_UUID}=="mpath-1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/OCR01_2"
ENV{DM_UUID}=="mpath-1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/OCR02_2"
KERNEL=="sd*", ENV{ID_SERIAL}=="36001405d4c80f6610234761a654d3302", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="oracleasm/OCR01_3"

9. 重讀設定檔

# /sbin/udevadm control --reload-rules
# /sbin/udevadm trigger

10. 檢查是否生效

# ls -l /dev/oracleasm/
total 0
lrwxrwxrwx. 1 root root 7 Feb 24 16:02 DATA01_1 -> ../dm-6
lrwxrwxrwx. 1 root root 7 Feb 24 16:02 DATA01_2 -> ../dm-7
lrwxrwxrwx. 1 root root 7 Feb 24 16:02 DATA02_1 -> ../dm-8
lrwxrwxrwx. 1 root root 7 Feb 24 16:02 DATA02_2 -> ../dm-9
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 FRA01_1 -> ../dm-10
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 FRA01_2 -> ../dm-11
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 FRA02_1 -> ../dm-13
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 FRA02_2 -> ../dm-12
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 OCR01_1 -> ../dm-15
lrwxrwxrwx. 1 root root 8 Feb 24 16:02 OCR01_2 -> ../dm-14
lrwxrwxrwx. 1 root root 6 Feb 24 16:05 OCR01_3 -> ../sdo
lrwxrwxrwx. 1 root root 8 Feb 24 16:05 OCR02_1 -> ../dm-17
lrwxrwxrwx. 1 root root 8 Feb 24 16:05 OCR02_2 -> ../dm-16

# ls -l /dev/dm-*
brw-rw----. 1 root disk     253,  0 Feb 22 10:09 /dev/dm-0
brw-rw----. 1 root disk     253,  1 Feb 22 10:09 /dev/dm-1
brw-rw----. 1 grid asmadmin 253, 10 Feb 22 10:09 /dev/dm-10
brw-rw----. 1 grid asmadmin 253, 11 Feb 22 10:09 /dev/dm-11
brw-rw----. 1 grid asmadmin 253, 12 Feb 22 10:09 /dev/dm-12
brw-rw----. 1 grid asmadmin 253, 13 Feb 22 10:09 /dev/dm-13
brw-rw----. 1 grid asmadmin 253, 14 Feb 22 10:09 /dev/dm-14
brw-rw----. 1 grid asmadmin 253, 15 Feb 22 10:09 /dev/dm-15
brw-rw----. 1 grid asmadmin 253, 16 Feb 22 10:09 /dev/dm-16
brw-rw----. 1 grid asmadmin 253, 17 Feb 22 10:09 /dev/dm-17
brw-rw----. 1 root disk     253,  2 Feb 22 10:09 /dev/dm-2
brw-rw----. 1 root disk     253,  3 Feb 22 10:09 /dev/dm-3
brw-rw----. 1 root disk     253,  4 Feb 22 10:09 /dev/dm-4
brw-rw----. 1 root disk     253,  5 Feb 22 10:09 /dev/dm-5
brw-rw----. 1 grid asmadmin 253,  6 Feb 22 10:09 /dev/dm-6
brw-rw----. 1 grid asmadmin 253,  7 Feb 22 10:09 /dev/dm-7
brw-rw----. 1 grid asmadmin 253,  8 Feb 22 10:09 /dev/dm-8
brw-rw----. 1 grid asmadmin 253,  9 Feb 22 10:09 /dev/dm-9

11. 將 multipath.conf 傳到其他台並重啟 multipathd 服務

# scp /etc/multipath.conf root@oraext2:/etc/
# ssh oraext2 systemctl restart multipathd.service

12. 將 99-oracle-asmdevices.rules 傳到其他台並重讀 udev 服務

# scp /etc/udev/rules.d/99-oracle-asmdevices.rules oraext2:/etc/udev/rules.d/
# ssh oraext2 udevadm control --reload-rules
# ssh oraext2 udevadm trigger

13. Grid安裝 asm_string 參數需要新增: /dev/oracleasm/*



Note: 查設備資訊

# udevadm info --query=all --name=/dev/sdo

P: /devices/platform/host39/session7/target39:0:0/39:0:0:0/block/sdo
N: sdo
S: disk/by-id/scsi-36001405d4c80f6610234761a654d3302
S: disk/by-id/wwn-0x6001405d4c80f6610234761a654d3302
S: disk/by-path/ip-172.30.2.15:3260-iscsi-iqn.2003-01.org.linux-iscsi.oraquo.x8664:sn.5a66575e26b6-lun-0
S: oracleasm/OCR01_3
E: DEVLINKS=/dev/disk/by-id/scsi-36001405d4c80f6610234761a654d3302 /dev/disk/by-id/wwn-0x6001405d4c80f6610234761a654d3302 /dev/disk/by-path/ip-172.30.2.15:3260-iscsi-iqn.2003-01.org.linux-iscsi.oraquo.x8664:sn.5a66575e26b6-lun-0 /dev/oracleasm/OCR01_3
E: DEVNAME=/dev/sdo
E: DEVPATH=/devices/platform/host39/session7/target39:0:0/39:0:0:0/block/sdo
E: DEVTYPE=disk
E: DM_MULTIPATH_TIMESTAMP=1645670214
E: ID_BUS=scsi
E: ID_MODEL=oraquo-disk
E: ID_MODEL_ENC=oraquo-disk\x20\x20\x20\x20\x20
E: ID_PATH=ip-172.30.2.15:3260-iscsi-iqn.2003-01.org.linux-iscsi.oraquo.x8664:sn.5a66575e26b6-lun-0
E: ID_PATH_TAG=ip-172_30_2_15_3260-iscsi-iqn_2003-01_org_linux-iscsi_oraquo_x8664_sn_5a66575e26b6-lun-0
E: ID_REVISION=4.0
E: ID_SCSI=1
E: ID_SCSI_SERIAL=d4c80f66-1023-4761-a654-d33023f2967f
E: ID_SERIAL=36001405d4c80f6610234761a654d3302
E: ID_SERIAL_SHORT=6001405d4c80f6610234761a654d3302
E: ID_TARGET_PORT=0
E: ID_TYPE=disk
E: ID_VENDOR=LIO-ORG
E: ID_VENDOR_ENC=LIO-ORG\x20
E: ID_WWN=0x6001405d4c80f661
E: ID_WWN_VENDOR_EXTENSION=0x0234761a654d3302
E: ID_WWN_WITH_EXTENSION=0x6001405d4c80f6610234761a654d3302
E: MAJOR=8
E: MINOR=224
E: MPATH_SBIN_PATH=/sbin
E: SUBSYSTEM=block
E: TAGS=:systemd:
E: USEC_INITIALIZED=1557568554

張貼留言

0 留言