Iscsi常用命令(我通過openfiler實現iscsi存儲) # iscsiadm -m discovery -t st -p IP:port //發現iSCSI存儲 # iscsiadm -m node -o delete -T TARGET -p IP:port //刪除iSCSI發現記錄 ...
Iscsi常用命令(我通過openfiler實現iscsi存儲)
# iscsiadm -m discovery -t st -p IP:port //發現iSCSI存儲
# iscsiadm -m node -o delete -T TARGET -p IP:port //刪除iSCSI發現記錄
# iscsiadm -m node //查看iSCSI發現記錄
# iscsiadm -m session //查看會話情況
# iscsiadm -m node -T TARGET -p IP:port -l //登錄iSCSI存儲
# iscsiadm -m node -T TARGET -p IP:port -u //登出iSCSI存儲
vim /etc/iscsi/initiatorname.iscsi //添加客戶端認證
InitiatorName=iqn.2018-12.com.oven:client //名稱與服務端acl設置的名稱一致
systemctl restart
iscsid //更新IQN
iscsiadm -m node -T iqn.2018-12.com.oven:master
-p 192.168.4.10 -l //更新IQN後登錄成功
[root@rhel1 ~]# fdisk /dev/sde --分區
[root@rhel1 ~]# udevadm test /sys/block/sde ----查看信息
配置udev來固定iscsi磁碟分區
[root@rhel1 ~]# vi /etc/udev/rules.d/99-openiscsi.rules ---->redhat7
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="14f504e46494c455248704e5673662d486e38762d505a7470", SYMLINK+="asm/asm_ocr3/part%n",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="14f504e46494c45524a59386b4a412d39354f472d35776769", SYMLINK+="asm/asm_fra/part%n",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="14f504e46494c455273656f6353492d3762575a2d69475577",SYMLINK+="asm/asm_dbfile/part%n",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="14f504e46494c455233326755744d2d544a586f2d4234696e", SYMLINK+="asm/asm_ocr1/part%n",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="14f504e46494c45524d79356d34542d354b456f2d326b3877", SYMLINK+="asm/asm_ocr2/part%n",OWNER="grid",GROUP="asmadmin",MODE="0660"
[root@rhel1 ~]# udevadm control --reload-rules
[root@rhel1 ~]# udevadm trigger
[root@rhel1 asm]# tree
├── asm_dbfile
│ └── part1 -> ../../sdf1
├── asm_fra
│ └── part1 -> ../../sde1
├── asm_ocr1
│ └── part1 -> ../../sdc1
├── asm_ocr2
│ └── part1 -> ../../sdb1
└── asm_ocr3
└── part1 -> ../../sdd1
5 directories, 5 files
ASM配置安裝
[root@rhel1 ~]# yum install kmod-oracleasm.x86_64
[root@rhel1 ~]# rpm -ivh /soft/oracleasmlib-2.0.12-1.el7.x86_64.rpm
[root@rhel1 ~]# rpm -ivh /soft/oracleasm-support-2.1.11-2.el7.x86_64.rpm
[root@rhel1 ~]# /etc/init.d/oracleasm configure
/etc/init.d/oracleasm is deprecated. Use 'oracleasm configure -i'
[root@rhel1 ~]# oracleasm configure –I ---- 兩個節點都需操作
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
[root@rhel1 asm]# oracleasm createdisk dbfile /dev/asm/asm_dbfile/part1
Writing disk header: done
Instantiating disk: done
[root@rhel2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "OCRDISK3"
Instantiating disk "FRAS"
Instantiating disk "DBFILES"
Instantiating disk "OCRDISK1"
Instantiating disk "OCRDISK2"
[root@rhel2 ~]# oracleasm listdisks
DBFILES
FRAS
OCRDISK1
OCRDISK2
OCRDISK3
[root@rhel2 ~]#oracleasm querydisk -p OCRDISK1 -----查詢asm磁碟所對應的設備文件
內核參數設置:
[root@ rhel1 ~]# vi /etc/sysctl.conf
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall =
kernel.shmmax =
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
執行sysctl -p
配置oracle、grid用戶的shell限制
[root@ rhel1 ~]# vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
[root@rhel1 ~]# vi /etc/pam.d/login
session required /lib64/security/pam_limits.so
添加用戶
[root@ rhel1 ~]# groupadd -g 1001 oinstall
[root@ rhel1 ~]# groupadd -g 1002 asmadmin
[root@ rhel1 ~]# groupadd -g 1003 dba
[root@ rhel1 ~]# groupadd -g 1004 oper
[root@ rhel1 ~]# groupadd -g 1005 asmdba
[root@ rhel1 ~]# groupadd -g 1006 asmoper
[root@ rhel1 ~]# useradd -u 1002 -g oinstall -G asmadmin,asmdba,asmoper grid
[root@ rhel1 ~]# usermod -g oinstall -G dba,oper,asmdba oracle
[root@ rhel1 ~]# mkdir -p /u01/app/11.2.0/grid
[root@ rhel1 ~]# chown -R grid:oinstall /u01
[root@ rhel1 ~]# mkdir /u01/app/oracle
[root@ rhel1 ~]# chown -R oracle:oinstall /u01/app/oracle/
[root@ rhel1 ~]# chmod -R 775 /u01/
DNS配置
[root@rhel2 ~]#yum install named
[root@rhel2 ~]# vi /etc/named.conf ----添加以下內容
zone "86.168.192.in-addr.arpa" IN{
type master;
file "192.168.86.db";
};
zone "example.com." IN {
type master;
file "example.com.db";
};
[root@rhel2 ~]# vi /var/named/example.com.db
$TTL 1H
@ IN SOA homeserver.localdomain. root.homeserver.localdomain. (
5 ; serial
3H ; refresh
1H ; retry
1W ; expire
1H ) ; minimum
NS homeserver.localdomain.
rhel-cluster-scan.grid IN A 192.168.86.170
rhel-cluster-scan.grid IN A 192.168.86.171
rhel-cluster-scan.grid IN A 192.168.86.172
[root@rhel2 ~]# vi /var/named/192.168.86.db
$TTL 1H
@ IN SOA homeserver.localdomain.grid.example.com. root.homeserver.localdomain.grid.example.com. (
2 ; serial
3H ; refresh
1H ; retry
1W ; expire
1H ) ; minimum
NS homeserver.localdomain.grid.example.com.
170 IN PTR rhel-cluster-scan.grid.example.com.
171 IN PTR rhel-cluster-scan.grid.example.com.
172 IN PTR rhel-cluster-scan.grid.example.com.
[root@rhel2 ~]# vi /etc/resolv.conf
nameserver 192.168.86.152
[root@rhel2 ~]# vi /etc/nsswitch.conf -----在hosts: files dns加上nis
hosts: files dns nis
[root@rhel2 ~]# nslookup rhel-cluster-scan.grid.example.com
Server: 192.168.86.152
Address: 192.168.86.152#53
Name: rhel-cluster-scan.grid.example.com
Address: 192.168.86.172
Name: rhel-cluster-scan.grid.example.com
Address: 192.168.86.170
Name: rhel-cluster-scan.grid.example.com
Address: 192.168.86.171
[grid@ rhel1 ~]#/u01/app/grid/runcluvfy.sh stage -pre crsinst -n rhel1,rhel2 -fixup –verbose ------執行檢查
Ssh互信配置 每個節點都需要
[oracle@ rhel1 ~]$
ssh-keygen -t rsa
ssh-keygen -t dsa
[oracle@ rhel1 ~]$
ssh rhel1 cat ~/.ssh/id_rsa.pub >> authorized_keys
ssh rhel2 cat ~/.ssh/id_rsa.pub >> authorized_keys
ssh rhel1 cat ~/.ssh/id_dsa.pub >> authorized_keys
ssh rhel2 cat ~/.ssh/id_dsa.pub >> authorized_keys
[oracle@ rhel1.ssh]$ scp authorized_keys rhel2:~/.ssh/
[oracle@ rhel2.ssh]$ chmod 600 authorized_keys
執行root.sh出現錯誤
Adding daemon to inittab
CRS-4124: Oracle High Availability Services startup failed.
CRS-4000: Command Start failed, or completed with errors.
ohasd failed to start: Inappropriate ioctl for device
ohasd failed to start at /u01/app/11.2.0/grid/crs/install/rootcrs.pl line 443.
[root@rhel1 ~]# /u01/app/11.2.0/grid/crs/install/roothas.pl -deconfig -force -verbose
出現 Adding daemon to inittab 時執行
[root@rhel1 ~]#dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1 (rhel7+11.2.0.1)
[root@rhel1 ~]# /u01/app/11.2.0/grid/root.sh ---->等待上一個節點執行完在執行