469 lines
15 KiB
Plaintext
469 lines
15 KiB
Plaintext
# DNS config
|
|
############
|
|
|
|
# config file swgalaxy.zone
|
|
|
|
ylesia-db01 IN A 192.168.0.114
|
|
ylesia-db01-vip IN A 192.168.0.115
|
|
ylesia-db01-priv IN A 192.168.1.114
|
|
ylesia-db01-asm IN A 192.168.2.114
|
|
|
|
ylesia-db02 IN A 192.168.0.116
|
|
ylesia-db02-vip IN A 192.168.0.117
|
|
ylesia-db02-priv IN A 192.168.1.116
|
|
ylesia-db02-asm IN A 192.168.2.116
|
|
|
|
ylesia-scan IN A 192.168.0.108
|
|
ylesia-scan IN A 192.168.0.109
|
|
ylesia-scan IN A 192.168.0.110
|
|
|
|
rodia-db01 IN A 192.168.0.93
|
|
rodia-db01-vip IN A 192.168.0.95
|
|
rodia-db01-priv IN A 192.168.1.93
|
|
rodia-db01-asm IN A 192.168.2.93
|
|
|
|
rodia-db02 IN A 192.168.0.94
|
|
rodia-db02-vip IN A 192.168.0.96
|
|
rodia-db02-priv IN A 192.168.1.94
|
|
rodia-db02-asm IN A 192.168.2.94
|
|
|
|
rodia-scan IN A 192.168.0.97
|
|
rodia-scan IN A 192.168.0.98
|
|
rodia-scan IN A 192.168.0.99
|
|
|
|
# config file 0.168.192.in-addr.arpa
|
|
|
|
114 IN PTR ylesia-db01.
|
|
116 IN PTR ylesia-db02.
|
|
115 IN PTR ylesia-db01-vip.
|
|
117 IN PTR ylesia-db02-vip.
|
|
|
|
108 IN PTR ylesia-scan.
|
|
109 IN PTR ylesia-scan.
|
|
110 IN PTR ylesia-scan.
|
|
|
|
93 IN PTR rodia-db01.swgalaxy.
|
|
94 IN PTR rodia-db02.swgalaxy.
|
|
95 IN PTR rodia-db01-vip.swgalaxy.
|
|
96 IN PTR rodia-db02-vip.swgalaxy.
|
|
|
|
97 IN PTR rodia-scan.swgalaxy.
|
|
98 IN PTR rodia-scan.swgalaxy.
|
|
99 IN PTR rodia-scan.swgalaxy.
|
|
|
|
|
|
qemu-img create -f raw /vm/hdd0/ylesia-rac/ylesia-db01/boot_01.img 4G
|
|
qemu-img create -f raw /vm/hdd0/ylesia-rac/ylesia-db01/root_01.img 30G
|
|
qemu-img create -f raw /vm/hdd0/ylesia-rac/ylesia-db01/swap_01.img 20G
|
|
qemu-img create -f raw /vm/hdd0/ylesia-rac/ylesia-db01/app_01.img 60G
|
|
|
|
|
|
# get os-variant as Short ID from OS info database
|
|
osinfo-query os | grep -i oracle | sort
|
|
|
|
virt-install \
|
|
--graphics vnc,password=secret,listen=0.0.0.0 \
|
|
--name=ylesia-db01 \
|
|
--vcpus=4 \
|
|
--memory=40960 \
|
|
--network bridge=br0 \
|
|
--network bridge=br0 \
|
|
--network bridge=br0 \
|
|
--cdrom=/mnt/yavin4/kit/Oracle/OEL8/OracleLinux-R8-U7-x86_64-dvd.iso \
|
|
--disk /vm/hdd0/ylesia-rac/ylesia-db01/boot_01.img \
|
|
--disk /vm/hdd0/ylesia-rac/ylesia-db01/root_01.img \
|
|
--disk /vm/hdd0/ylesia-rac/ylesia-db01/swap_01.img \
|
|
--disk /vm/hdd0/ylesia-rac/ylesia-db01/app_01.img \
|
|
--os-variant=ol8.5
|
|
|
|
|
|
# on host install packages
|
|
dnf install bind-utils
|
|
dnf install zip.x86_64 unzip.x86_64 gzip.x86_64
|
|
dnf install pigz.x86_64
|
|
dnf install net-tools.x86_64
|
|
dnf install oracle-database-preinstall-19c.x86_64
|
|
dnf install oracle-database-preinstall-21c.x86_64
|
|
dnf install unixODBC
|
|
dnf install wget
|
|
dnf install lsof.x86_64
|
|
|
|
|
|
# Enable EPEL Repository on Oracle Linux 8
|
|
tee /etc/yum.repos.d/ol8-epel.repo<<EOF
|
|
[ol8_developer_EPEL]
|
|
name= Oracle Linux \$releasever EPEL (\$basearch)
|
|
baseurl=https://yum.oracle.com/repo/OracleLinux/OL8/developer/EPEL/\$basearch/
|
|
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
|
|
gpgcheck=1
|
|
enabled=1
|
|
EOF
|
|
|
|
dnf makecache
|
|
|
|
# Install rlwrap
|
|
dnf install rlwrap.x86_64
|
|
|
|
# disable firewall
|
|
systemctl status firewalld
|
|
systemctl stop firewalld
|
|
systemctl disable firewalld
|
|
|
|
|
|
# disable selinux
|
|
getenforce
|
|
# update /etc/selinux/config
|
|
# restart the server and check if it is diabled
|
|
getenforce
|
|
|
|
|
|
# grid infrastructure users and groups
|
|
groupadd -g 54327 asmoper
|
|
groupadd -g 54328 asmdba
|
|
groupadd -g 54329 asmadmin
|
|
|
|
useradd -g oinstall -G asmoper,asmdba,asmadmin -c "Grid Infrastructure Owner" grid
|
|
usermod -g oinstall -G asmdba,dba,oper -c "Oracle Sotfware Owner" oracle
|
|
|
|
|
|
# install ASMLib
|
|
# see Metalink note: Oracle Linux 8: How To Install ASMLib (Doc ID 2720215.1)
|
|
|
|
# that will install oracleasm-support & oracleasmlib
|
|
cd /tmp
|
|
wget https://download.oracle.com/otn_software/asmlib/oracleasmlib-2.0.17-1.el8.x86_64.rpm
|
|
wget https://public-yum.oracle.com/repo/OracleLinux/OL8/addons/x86_64/getPackage/oracleasm-support-2.1.12-1.el8.x86_64.rpm
|
|
dnf localinstall ./oracleasm-support-2.1.12-1.el8.x86_64.rpm ./oracleasmlib-2.0.17-1.el8.x86_64.rpm
|
|
|
|
|
|
# in Dom0, create virtual disk for ASM
|
|
dd if=/dev/zero of=/vm/ssd0/ylesia-rac/disk-array/asm_data_01.img bs=1G count=30
|
|
dd if=/dev/zero of=/vm/ssd0/ylesia-rac/disk-array/asm_data_02.img bs=1G count=30
|
|
dd if=/dev/zero of=/vm/ssd0/ylesia-rac/disk-array/asm_data_03.img bs=1G count=30
|
|
dd if=/dev/zero of=/vm/ssd0/ylesia-rac/disk-array/asm_data_04.img bs=1G count=30
|
|
dd if=/dev/zero of=/vm/ssd0/ylesia-rac/disk-array/asm_data_05.img bs=1G count=30
|
|
|
|
dd if=/dev/zero of=/vm/hdd0/ylesia-rac/disk-array/asm_reco_01.img bs=1G count=20
|
|
dd if=/dev/zero of=/vm/hdd0/ylesia-rac/disk-array/asm_reco_02.img bs=1G count=20
|
|
dd if=/dev/zero of=/vm/hdd0/ylesia-rac/disk-array/asm_reco_03.img bs=1G count=20
|
|
dd if=/dev/zero of=/vm/hdd0/ylesia-rac/disk-array/asm_reco_04.img bs=1G count=20
|
|
|
|
|
|
# list the block devices of VM
|
|
virsh domblklist ylesia-db01 --details
|
|
|
|
# attach disks to the VM (with VM stopped for more than 1 disk to attach, I don't know why)
|
|
# vdX device names will be renamed automatically at VM start in order do not have gaps
|
|
|
|
virsh attach-disk ylesia-db01 --source /vm/ssd0/ylesia-rac/disk-array/asm_data_01.img --target vdi --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/ssd0/ylesia-rac/disk-array/asm_data_02.img --target vdj --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/ssd0/ylesia-rac/disk-array/asm_data_03.img --target vdk --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/ssd0/ylesia-rac/disk-array/asm_data_04.img --target vdl --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/ssd0/ylesia-rac/disk-array/asm_data_05.img --target vdm --persistent
|
|
|
|
virsh attach-disk ylesia-db01 --source /vm/hdd0/ylesia-rac/disk-array/asm_reco_01.img --target vdn --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/hdd0/ylesia-rac/disk-array/asm_reco_02.img --target vdo --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/hdd0/ylesia-rac/disk-array/asm_reco_03.img --target vdp --persistent
|
|
virsh attach-disk ylesia-db01 --source /vm/hdd0/ylesia-rac/disk-array/asm_reco_04.img --target vdr --persistent
|
|
|
|
|
|
# edit VM xml config file and add to disk array disk's keyword: <shareable/>
|
|
|
|
oracleasm configure -i
|
|
# choose grid for user and asmdba for group
|
|
oracleasm init
|
|
|
|
# if we need to use an older kernel prior to lmast kernel update
|
|
# https://www.golinuxcloud.com/change-default-kernel-version-rhel-centos-8/
|
|
|
|
|
|
# create ASM disks
|
|
oracleasm status
|
|
oracleasm scandisks
|
|
oracleasm listdisks
|
|
|
|
# list block devices
|
|
lsblk
|
|
|
|
# use following shell script to create all new partitions
|
|
|
|
---------------------------------------------------------------------------------------
|
|
#!/bin/sh
|
|
hdd="/dev/vde /dev/vdf /dev/vdg /dev/vdh /dev/vdi /dev/vdj /dev/vdk /dev/vdl /dev/vdm"
|
|
for i in $hdd;do
|
|
echo "n
|
|
p
|
|
1
|
|
|
|
|
|
w
|
|
"|fdisk $i;done
|
|
---------------------------------------------------------------------------------------
|
|
|
|
# if ASMLib is used
|
|
oracleasm createdisk DATA_01 /dev/vde1
|
|
oracleasm createdisk DATA_02 /dev/vdf1
|
|
oracleasm createdisk DATA_03 /dev/vdg1
|
|
oracleasm createdisk DATA_04 /dev/vdh1
|
|
oracleasm createdisk DATA_05 /dev/vdi1
|
|
|
|
oracleasm createdisk RECO_01 /dev/vdj1
|
|
oracleasm createdisk RECO_02 /dev/vdk1
|
|
oracleasm createdisk RECO_03 /dev/vdl1
|
|
oracleasm createdisk RECO_04 /dev/vdm1
|
|
|
|
|
|
# without ASMLib
|
|
vi /etc/udev/rules.d/99-oracle-asmdevices.rules
|
|
KERNEL=="vde1",NAME="asm_data_01",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdf1",NAME="asm_data_02",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdg1",NAME="asm_data_03",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdh1",NAME="asm_data_04",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdi1",NAME="asm_data_05",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
|
|
KERNEL=="vdj1",NAME="asm_reco_01",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdk1",NAME="asm_reco_02",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdl1",NAME="asm_reco_03",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
KERNEL=="vdm1",NAME="asm_reco_04",OWNER="grid",GROUP="asmadmin",MODE="0660"
|
|
|
|
|
|
# at this moment clone the VM
|
|
# on Dom0
|
|
virsh dumpxml ylesia-db01 > /tmp/myvm.xml
|
|
# modify XML file:
|
|
# replace ylesia-db01 by ylesia-db02
|
|
# remove <uuid>...</uuid> line
|
|
# generate new mac addresses for network interfaces
|
|
|
|
date +%s | md5sum | head -c 6 | sed -e 's/\([0-9A-Fa-f]\{2\}\)/\1:/g' -e 's/\(.*\):$/\1/' | sed -e 's/^/52:54:00:/'
|
|
|
|
virsh define /tmp/myvm.xml
|
|
|
|
# start cloned ylesia-db02 VM and change IP adresses and host name
|
|
vi /etc/sysconfig/network-scripts/ifcfg-enp1s0
|
|
vi /etc/sysconfig/network-scripts/ifcfg-enp2s0
|
|
vi /etc/sysconfig/network-scripts/ifcfg-enp3s0
|
|
|
|
hostnamectl set-hostname ylesia-db02.swgalaxy
|
|
|
|
# mount CIFS share on both VM
|
|
dnf install cifs-utils.x86_64
|
|
|
|
groupadd smbuser --gid 1502
|
|
useradd smbuser --uid 1502 -g smbuser -G smbuser
|
|
|
|
mkdir -p /mnt/yavin4
|
|
|
|
# test CIFS mount
|
|
mount -t cifs //192.168.0.9/share /mnt/yavin4 -o vers=2.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,user=vplesnila
|
|
umount /mnt/yavin4
|
|
|
|
# create credentials file for automount: /root/.smbcred
|
|
# username=vplesnila
|
|
# password=*****
|
|
|
|
# add in /etc/fstab
|
|
# //192.168.0.9/share /mnt/yavin4 cifs vers=2.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred 0 0
|
|
|
|
-- mount
|
|
mount -a
|
|
|
|
# oracle user profile
|
|
---------------------------------------------------------------------------------------
|
|
# .bash_profile
|
|
|
|
# Get the aliases and functions
|
|
if [ -f ~/.bashrc ]; then
|
|
. ~/.bashrc
|
|
fi
|
|
|
|
# User specific environment and startup programs
|
|
alias listen='lsof -i -P | grep -i "listen"'
|
|
alias s='rlwrap sqlplus / as sysdba'
|
|
alias r='rlwrap rman target /'
|
|
|
|
PS1='\u@\h[$ORACLE_SID]:$PWD\$ '
|
|
umask 022
|
|
|
|
PATH=$PATH:$HOME/.local/bin:$HOME/bin
|
|
|
|
export PATH
|
|
---------------------------------------------------------------------------------------
|
|
|
|
|
|
# grid user profile
|
|
---------------------------------------------------------------------------------------
|
|
# .bash_profile
|
|
|
|
# Get the aliases and functions
|
|
if [ -f ~/.bashrc ]; then
|
|
. ~/.bashrc
|
|
fi
|
|
|
|
# User specific environment and startup programs
|
|
alias listen='lsof -i -P | grep -i "listen"'
|
|
alias asmcmd='rlwrap asmcmd'
|
|
alias s='rlwrap sqlplus / as sysasm'
|
|
PS1='\u@\h[$ORACLE_SID]:$PWD\$ '
|
|
umask 022
|
|
|
|
GRID_HOME=/app/grid/product/21.3
|
|
ORACLE_SID=+ASM1
|
|
ORACLE_BASE=/app/grid/base
|
|
ORACLE_HOME=$GRID_HOME
|
|
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ORACLE_HOME/lib
|
|
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_HOME/bin:$ORACLE_HOME/OPatch
|
|
|
|
|
|
export ORACLE_BASE
|
|
export ORACLE_HOME
|
|
export LD_LIBRARY_PATH
|
|
export ORACLE_SID
|
|
export PATH
|
|
---------------------------------------------------------------------------------------
|
|
|
|
# generate SSH keys on both VM and add public keys in .ssh/authorized_keys in order to connect locally and cross connect without password
|
|
ssh-keygen
|
|
cd
|
|
cat .ssh/id_rsa.pub >> .ssh/authorized_keys
|
|
|
|
# as root on both VM
|
|
mkdir -p /app/grid/product/21.3
|
|
mkdir -p /app/grid/base
|
|
mkdir -p /app/grid/oraInventory
|
|
|
|
chown -R grid:oinstall /app/grid/product/21.3
|
|
chown -R grid:oinstall /app/grid/base
|
|
chown -R grid:oinstall /app/grid/oraInventory
|
|
|
|
# on the 1st VM, unzip grid infrastructure distribution ZIP file
|
|
cd /app/grid/product/21.3
|
|
unzip /mnt/yavin4/kit/Oracle/Oracle_Database_21/LINUX.X64_213000_grid_home.zip
|
|
|
|
|
|
# from a X11 terminal, proceed with software installation
|
|
/app/grid/product/21.3/gridSetup.sh
|
|
|
|
# same command to use after software installation in order to configure the new Oracle Cluster
|
|
/app/grid/product/21.3/gridSetup.sh
|
|
|
|
# if grid setup fails with the error PRVG-11250 The Check "RPM Package Manager Database" Was Not Performed
|
|
# consider apply following MOS note: Cluvfy Fail with PRVG-11250 The Check "RPM Package Manager Database" Was Not Performed (Doc ID 2548970.1)
|
|
/app/grid/product/21.3/runcluvfy.sh stage -pre crsinst -n ylesia-db01,ylesia-db02 -method root
|
|
|
|
# from a X11 terminal, run ASM configuration assistent in order to create RECO diskgroup
|
|
/app/grid/product/21.3/bin/asmca
|
|
|
|
# check cluster status
|
|
crsctl status res -t
|
|
|
|
|
|
# Apply the latest GIRU patch using out-of-place method
|
|
#######################################################
|
|
|
|
# as root, create a staging area for patches on the first VM
|
|
mkdir -p /app/staging_area
|
|
chown -R grid:oinstall /app/staging_area
|
|
chmod g+w /app/staging_area
|
|
|
|
# as grid user, unzip GI patch in the staging area onn the first VM
|
|
su - grid
|
|
cd /app/staging_area
|
|
unzip /mnt/yavin4/kit/Oracle/Oracle_Database_21/patch/GI_RU_AVR23/p35132566_210000_Linux-x86-64.zip
|
|
|
|
# as root, on both VM, preparev the directory for the new GI
|
|
export NEW_GRID_HOME=/app/grid/software/21.10
|
|
|
|
mkdir -p $NEW_GRID_HOME
|
|
chown -R grid:oinstall $NEW_GRID_HOME
|
|
|
|
# as grid, only on the first VM unzip the base distibution of thr GI
|
|
su - grid
|
|
export NEW_GRID_HOME=/app/grid/software/21.10
|
|
cd $NEW_GRID_HOME
|
|
unzip /mnt/yavin4/kit/Oracle/Oracle_Database_21/LINUX.X64_213000_grid_home.zip
|
|
|
|
# very IMPORTANT
|
|
# deploy the last version of OPatch in the new GI home before proceed with the GI install with RU apply
|
|
# as grid user
|
|
cd $NEW_GRID_HOME
|
|
rm -rf OPatch
|
|
ls OPatch
|
|
unzip /mnt/yavin4/kit/Oracle/opatch/p6880880_210000_Linux-x86-64.zip
|
|
|
|
# at this moment, just simulate an install of the base GI, software only
|
|
# do not install, just put the response file aside
|
|
|
|
# setup the new GI HOME and install the GIRU
|
|
export NEW_GRID_HOME=/app/grid/software/21.10
|
|
export ORACLE_HOME=$NEW_GRID_HOME
|
|
$ORACLE_HOME/gridSetup.sh -executePrereqs -silent
|
|
|
|
cd $ORACLE_HOME
|
|
./gridSetup.sh -ignorePrereq -waitforcompletion -silent \
|
|
-applyRU /app/staging_area/35132566 \
|
|
-responseFile /home/grid/grid.rsp
|
|
|
|
|
|
# once new GI homes are insalled and updated to the lasr GIRU
|
|
# switch CRS to the new GI HOME, on each VM's one by one (rolling mode)
|
|
|
|
export NEW_GRID_HOME=/app/grid/software/21.10
|
|
export ORACLE_HOME=$NEW_GRID_HOME
|
|
export CURRENT_NODE=$(hostname)
|
|
|
|
$ORACLE_HOME/gridSetup.sh \
|
|
-silent -switchGridHome \
|
|
oracle.install.option=CRS_SWONLY \
|
|
ORACLE_HOME=$ORACLE_HOME \
|
|
oracle.install.crs.config.clusterNodes=$CURRENT_NODE \
|
|
oracle.install.crs.rootconfig.executeRootScript=false
|
|
|
|
# check if grid:oinstall is the owner of GI HOME, otherwise modify-it:
|
|
chown grid /app/grid/product/21.10
|
|
|
|
# IMPORTANT: do not remove the old GI HOME before switching to the new GI HOME on all nodes
|
|
|
|
# update grid .bash_profile with the new GI home and check CRS
|
|
crsctl status res -t
|
|
|
|
# display registered ORACLE_HOME's
|
|
cat /app/grid/oraInventory/ContentsXML/inventory.xml | grep "HOME NAME"
|
|
|
|
# as grid user, on both VM, remove OLD ORACLE_HOME
|
|
export OLD_GRID_HOME=/app/grid/product
|
|
export ORACLE_HOME=$OLD_GRID_HOME
|
|
$ORACLE_HOME/deinstall/deinstall -local
|
|
|
|
# divers
|
|
########
|
|
|
|
# if some install/deinstall operations for 19 rdbms are failing checking OEL8.7 compatibility, use:
|
|
export CV_ASSUME_DISTID=OL7
|
|
|
|
# possible also to need following libs
|
|
dnf install libstdc++-devel.x86_64
|
|
dnf install libaio-devel.x86_64
|
|
dnf install libcap.x86_64 libcap-devel.x86_64
|
|
|
|
|
|
# potential issue with Oracle 19 RDBMS binary
|
|
# check permission (-rwsr-s--x) and owner (oracle:oinstall) for 19 oracle binary
|
|
ls -l /app/oracle/product/19/bin/oracle
|
|
# if is not good, issue as root
|
|
chown oracle:asmadmin /app/oracle/product/19/bin/oracle
|
|
chmod 6751 /app/oracle/product/19/bin/oracle
|
|
|
|
|
|
# if CLSRSC-762: Empty site GUID for the local site name (Doc ID 2878740.1)
|
|
# update $GRID_HOME/crs/install/crsgenconfig_params
|
|
# put the name of the RAC and generate a new UUID using linux uuid command
|
|
|
|
|
|
# Enabling a Read-Only Oracle Home
|
|
$ORACLE_HOME/bin/roohctl -enable
|
|
|
|
|
|
|
|
|