2026-03-12 21:01:38
This commit is contained in:
117
divers/ADB_free_install_01.txt
Normal file
117
divers/ADB_free_install_01.txt
Normal file
@@ -0,0 +1,117 @@
|
||||
-- https://github.com/oracle/adb-free/pkgs/container/adb-free
|
||||
|
||||
dd if=/dev/zero of=/vm/ssd0/ithor/app_02.img bs=1G count=8
|
||||
dd if=/dev/zero of=/vm/ssd0/ithor/app_03.img bs=1G count=8
|
||||
virsh domblklist ithor --details
|
||||
virsh attach-disk ithor /vm/ssd0/ithor/app_03.img vde --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk ithor /vm/ssd0/ithor/app_02.img vdf --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
|
||||
lsblk
|
||||
pvs
|
||||
pvcreate /dev/vde1
|
||||
pvcreate /dev/vdf1
|
||||
vgs
|
||||
vgextend vgapp /dev/vde1
|
||||
vgextend vgapp /dev/vdf1
|
||||
lvs
|
||||
lvextend -l +100%FREE /dev/vgapp/app
|
||||
xfs_growfs /app
|
||||
df -hT
|
||||
|
||||
# disable selinux
|
||||
/etc/selinux/config
|
||||
SELINUX=disabled
|
||||
|
||||
# install podman
|
||||
dnf install podman.x86_64
|
||||
|
||||
# change storage path for pods
|
||||
/etc/containers/storage.conf
|
||||
|
||||
# create a volume to user later for DATAPUMP / persistent storage across containers
|
||||
podman volume create adb_container_volume
|
||||
|
||||
# build pod
|
||||
podman run -d \
|
||||
-p 1521:1522 \
|
||||
-p 1522:1522 \
|
||||
-p 8443:8443 \
|
||||
-p 27017:27017 \
|
||||
-e DATABASE_NAME=ITHOR \
|
||||
-e WORKLOAD_TYPE=ATP \
|
||||
-e WALLET_PASSWORD=Remotecontrol1 \
|
||||
-e ADMIN_PASSWORD=Remotecontrol1 \
|
||||
--cap-add SYS_ADMIN \
|
||||
--device /dev/fuse \
|
||||
--name adb-free \
|
||||
--volume adb_container_volume:/u01/data \
|
||||
ghcr.io/oracle/adb-free:latest-23ai
|
||||
|
||||
# list pods and logs
|
||||
podman ps -a
|
||||
podman logs -f --names adb-free
|
||||
|
||||
# generate systemd unit to manage pod startup
|
||||
podman generate systemd --restart-policy=always -t 1 adb-free > /etc/systemd/system/adb-free.service
|
||||
systemctl list-unit-files | grep adb
|
||||
|
||||
systemctl enable adb-free.service
|
||||
systemctl stop adb-free.service
|
||||
systemctl start adb-free.service
|
||||
|
||||
# extract certificates from pod
|
||||
mkdir /app/adb-free
|
||||
podman cp adb-free:/u01/app/oracle/wallets/tls_wallet /app/adb-free/
|
||||
|
||||
# setup SQL*Plus connections from a linux machine
|
||||
# client 23 required
|
||||
# from umbara
|
||||
scp -rp ithor:/app/adb-free/tls_wallet adb-free_tls_wallet
|
||||
chown -R oracle:oinstall adb-free_tls_wallet
|
||||
|
||||
su - oracle
|
||||
export TNS_ADMIN=/app/oracle/adb-free_tls_wallet
|
||||
sed -i 's/localhost/ithor.swgalaxy/g' $TNS_ADMIN/tnsnames.ora
|
||||
|
||||
sqcl admin/Remotecontrol1@ithor_low_tls
|
||||
sqcl admin/Remotecontrol1@ithor_low
|
||||
|
||||
# create another ADMIN user
|
||||
-----------------------------------------------------------------
|
||||
-- USER SQL
|
||||
CREATE USER LIVESQL IDENTIFIED BY Remotecontrol1;
|
||||
|
||||
-- ADD ROLES
|
||||
GRANT CONNECT TO LIVESQL;
|
||||
GRANT CONSOLE_DEVELOPER TO LIVESQL;
|
||||
GRANT GRAPH_DEVELOPER TO LIVESQL;
|
||||
GRANT RESOURCE TO LIVESQL;
|
||||
ALTER USER LIVESQL DEFAULT ROLE CONSOLE_DEVELOPER,GRAPH_DEVELOPER;
|
||||
|
||||
-- REST ENABLE
|
||||
BEGIN
|
||||
ORDS_ADMIN.ENABLE_SCHEMA(
|
||||
p_enabled => TRUE,
|
||||
p_schema => 'LIVESQL',
|
||||
p_url_mapping_type => 'BASE_PATH',
|
||||
p_url_mapping_pattern => 'livesql',
|
||||
p_auto_rest_auth=> TRUE
|
||||
);
|
||||
-- ENABLE DATA SHARING
|
||||
C##ADP$SERVICE.DBMS_SHARE.ENABLE_SCHEMA(
|
||||
SCHEMA_NAME => 'LIVESQL',
|
||||
ENABLED => TRUE
|
||||
);
|
||||
commit;
|
||||
END;
|
||||
/
|
||||
|
||||
-- ENABLE GRAPH
|
||||
ALTER USER LIVESQL GRANT CONNECT THROUGH GRAPH$PROXY_USER;
|
||||
|
||||
-- QUOTA
|
||||
ALTER USER LIVESQL QUOTA UNLIMITED ON DATA;
|
||||
-----------------------------------------------------------------
|
||||
-- extra
|
||||
GRANT PDB_DBA TO LIVESQL;
|
||||
|
||||
105
divers/FK_indexing_01.txt
Normal file
105
divers/FK_indexing_01.txt
Normal file
@@ -0,0 +1,105 @@
|
||||
drop table SUPPLIER purge;
|
||||
|
||||
create table SUPPLIER(
|
||||
id INTEGER generated always as identity
|
||||
,name varchar2(30) not null
|
||||
,primary key(id)
|
||||
)
|
||||
;
|
||||
|
||||
|
||||
insert /*+ APPEND */ into SUPPLIER(name)
|
||||
select
|
||||
dbms_random.string('x',30)
|
||||
from
|
||||
xmltable('1 to 100')
|
||||
;
|
||||
|
||||
commit;
|
||||
|
||||
|
||||
drop table PRODUCT purge;
|
||||
create table PRODUCT(
|
||||
id integer generated always as identity
|
||||
,supplier_id integer
|
||||
,product_name varchar2(30)
|
||||
,price NUMBER
|
||||
,primary key(id)
|
||||
,constraint fk_prod_suppl foreign key(supplier_id) references SUPPLIER(id) on delete cascade
|
||||
)
|
||||
;
|
||||
|
||||
alter table PRODUCT drop constraint fk_prod_suppl;
|
||||
alter table PRODUCT add constraint fk_prod_suppl foreign key(supplier_id) references SUPPLIER(id) on delete cascade;
|
||||
|
||||
insert /*+ APPEND */ into PRODUCT(supplier_id,product_name,price)
|
||||
select
|
||||
trunc(dbms_random.value(1,90))
|
||||
,dbms_random.string('x',30)
|
||||
,dbms_random.value(1,10000)
|
||||
from
|
||||
xmltable('1 to 10000000')
|
||||
;
|
||||
|
||||
commit;
|
||||
|
||||
|
||||
-- grant execute on dbms_job to POC;
|
||||
-- grant create job to POC;
|
||||
|
||||
create or replace procedure delete_supplier(suppl_id integer) as
|
||||
begin
|
||||
DBMS_APPLICATION_INFO.set_module(module_name => 'delete_supplier', action_name => 'Delete supplier');
|
||||
delete from SUPPLIER where id=suppl_id;
|
||||
commit;
|
||||
end;
|
||||
/
|
||||
|
||||
|
||||
create or replace procedure parallel_delete_supplier as
|
||||
v_jobno number:=0;
|
||||
begin
|
||||
for i in 51..100 loop
|
||||
dbms_job.submit(v_jobno,'delete_supplier('||to_char(i)||');', sysdate);
|
||||
end loop;
|
||||
commit;
|
||||
end;
|
||||
/
|
||||
|
||||
-- create a huge locking situation ;)
|
||||
exec parallel_delete_supplier;
|
||||
|
||||
|
||||
SQL> @ash/ashtop inst_id,session_id,sql_id,event2,blocking_inst_id,blocking_session,blocking_session_status,P1text,p2,p3 "username='POC'" sysdate-1/24/20 sysdate
|
||||
|
||||
Total Distinct Distinct
|
||||
Seconds AAS %This INST_ID SESSION_ID SQL_ID EVENT2 BLOCKING_INST_ID BLOCKING_SESSION BLOCKING_SE P1TEXT P2 P3 FIRST_SEEN LAST_SEEN Execs Seen Tstamps
|
||||
--------- ------- ------- ---------- ---------- ------------- ------------------------------------------ ---------------- ---------------- ----------- ------------------------------ ---------- ---------- ------------------- ------------------- ---------- --------
|
||||
15 .1 2% | 1 19 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 20 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 21 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 23 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 25 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 27 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 29 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 30 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 31 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 33 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 35 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 38 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 158 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 159 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
15 .1 2% | 1 160 2b4hjy6xfb76s enq: TM - contention [mode=5] 1 450 VALID name|mode 42238 0 2024-02-11 19:09:40 2024-02-11 19:09:54 1 15
|
||||
|
||||
|
||||
-- find enq mode from P1 column og gv$session
|
||||
SQL> select distinct' [mode='||BITAND(p1, POWER(2,14)-1)||']' from gv$session where username='POC' and event like 'enq%';
|
||||
|
||||
'[MODE='||BITAND(P1,POWER(2,14)-1)||']'
|
||||
------------------------------------------------
|
||||
[mode=5]
|
||||
|
||||
|
||||
-- index the FK on child table
|
||||
create index IDX_PRODUCT_SUPPL_ID on PRODUCT(supplier_id);
|
||||
|
||||
11
divers/KVM_VM_create_Windows_11.txt
Normal file
11
divers/KVM_VM_create_Windows_11.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
qemu-img create -f raw /vm/ssd0/utapau/hdd_01.img 200G
|
||||
|
||||
virt-install \
|
||||
--graphics vnc,password=secret,listen=0.0.0.0 \
|
||||
--name=utapau \
|
||||
--vcpus=2 \
|
||||
--memory=4096 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/vm/hdd0/_kit_/Win10_1809Oct_v2_French_x64.iso \
|
||||
--disk=/vm/ssd0/utapau/hdd_01.img \
|
||||
--os-variant=win10
|
||||
13
divers/KVM_VM_create_linux.txt
Normal file
13
divers/KVM_VM_create_linux.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
qemu-img create -f raw /vm/ssd0/topawa/hdd_01.img 200G
|
||||
|
||||
virt-install \
|
||||
--graphics vnc,password=secret,listen=0.0.0.0 \
|
||||
--name=topawa \
|
||||
--vcpus=4 \
|
||||
--memory=8192 \
|
||||
--network bridge=br0 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/vm/hdd0/_kit_/extix-23.4-64bit-deepin-23-refracta-3050mb-230403.iso \
|
||||
--disk=/vm/ssd0/topawa/hdd_01.img \
|
||||
--os-variant=ubuntu22.04
|
||||
|
||||
95
divers/KVM_install_Rocky9_01.txt
Normal file
95
divers/KVM_install_Rocky9_01.txt
Normal file
@@ -0,0 +1,95 @@
|
||||
-- Network setup
|
||||
----------------
|
||||
|
||||
nmcli connection show --active
|
||||
|
||||
nmcli connection modify enp4s0 ipv4.address 192.168.0.4/24
|
||||
nmcli connection modify enp4s0 ipv4.method manual ipv6.method ignore
|
||||
nmcli connection modify enp4s0 ipv4.gateway 192.168.0.1
|
||||
nmcli connection modify enp4s0 ipv4.dns 192.168.0.8
|
||||
nmcli connection modify enp4s0 ipv4.dns-search swgalaxy
|
||||
|
||||
hostnamectl set-hostname naboo.swgalaxy
|
||||
|
||||
# SELINUX=disabled
|
||||
/etc/selinux/config
|
||||
|
||||
systemctl stop firewalld
|
||||
systemctl disable firewalld
|
||||
|
||||
-- KVM install
|
||||
--------------
|
||||
|
||||
dnf install -y qemu-kvm libvirt virt-manager virt-install virtio-win.noarch
|
||||
dnf install -y epel-release -y
|
||||
dnf -y install bridge-utils virt-top libguestfs-tools bridge-utils virt-viewer
|
||||
dnf -y install at wget bind-utils
|
||||
|
||||
systemctl start atd
|
||||
systemctl enable atd
|
||||
systemctl status atd
|
||||
|
||||
lsmod | grep kvm
|
||||
|
||||
systemctl start libvirtd
|
||||
systemctl enable libvirtd
|
||||
|
||||
brctl show
|
||||
nmcli connection show
|
||||
|
||||
# This section should be scripted and run from the server console or run under at-script as background command
|
||||
#---->
|
||||
|
||||
export BR_NAME="br0"
|
||||
export BR_INT="enp4s0"
|
||||
export SUBNET_IP="192.168.0.4/24"
|
||||
export GW="192.168.0.1"
|
||||
export DNS1="192.168.0.8"
|
||||
|
||||
nmcli connection add type bridge autoconnect yes con-name ${BR_NAME} ifname ${BR_NAME}
|
||||
|
||||
nmcli connection modify ${BR_NAME} ipv4.addresses ${SUBNET_IP} ipv4.method manual
|
||||
nmcli connection modify ${BR_NAME} ipv4.gateway ${GW}
|
||||
nmcli connection modify ${BR_NAME} ipv4.dns ${DNS1}
|
||||
|
||||
nmcli connection delete ${BR_INT}
|
||||
nmcli connection add type bridge-slave autoconnect yes con-name ${BR_INT} ifname ${BR_INT} master ${BR_NAME}
|
||||
|
||||
nmcli connection show
|
||||
nmcli connection up br0
|
||||
nmcli connection show br0
|
||||
|
||||
ip addr show
|
||||
|
||||
systemctl restart libvirtd
|
||||
# <-----
|
||||
|
||||
|
||||
# Install other stuff: Cockpit, bind-utils, cifs-utils etc.
|
||||
dnf install cockpit cockpit-machines.noarch -y
|
||||
|
||||
systemctl start cockpit
|
||||
systemctl enable --now cockpit.socket
|
||||
|
||||
# reboot the system
|
||||
|
||||
dnf install -y lsof bind-utils cifs-utils.x86_64
|
||||
|
||||
# setup CIFS mounts
|
||||
groupadd smbuser --gid 1502
|
||||
useradd smbuser --uid 1502 -g smbuser -G smbuser
|
||||
|
||||
-- create credentials file for automount: /root/.smbcred
|
||||
username=vplesnila
|
||||
password=*****
|
||||
|
||||
mkdir -p /mnt/yavin4
|
||||
mkdir -p /mnt/unprotected
|
||||
|
||||
-- add in /etc/fstab
|
||||
//192.168.0.9/share /mnt/yavin4 cifs vers=3.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred,mfsymlinks,iocharset=utf8 0 0
|
||||
//192.168.0.9/unprotected /mnt/unprotected cifs vers=3.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred,mfsymlinks,iocharset=utf8 0 0
|
||||
|
||||
systemctl daemon-reload
|
||||
mount -a
|
||||
|
||||
2
divers/KVM_save_all_domain_XML.txt
Normal file
2
divers/KVM_save_all_domain_XML.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
virsh list --all --name | awk {'print "virsh dumpxml " $1 " > " $1".xml"'} | grep -v "virsh dumpxml > .xml"
|
||||
|
||||
144
divers/OEL9_install_01.txt
Normal file
144
divers/OEL9_install_01.txt
Normal file
@@ -0,0 +1,144 @@
|
||||
dd if=/dev/zero of=system_01.img bs=1G count=10
|
||||
dd if=/dev/zero of=swap_01.img bs=1G count=4
|
||||
|
||||
# create new domain
|
||||
virt-install \
|
||||
--graphics vnc,password=secret,listen=0.0.0.0 \
|
||||
--name=seedmachine \
|
||||
--vcpus=4 \
|
||||
--memory=8192 \
|
||||
--network bridge=br0 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/mnt/yavin4/kit/Linux/OracleLinux-R9-U4-x86_64-boot-uek.iso \
|
||||
--disk /vm/ssd0/seedmachine/system_01.img \
|
||||
--disk /vm/ssd0/seedmachine/swap_01.img \
|
||||
--os-variant=ol9.3
|
||||
|
||||
dnf install -y lsof bind-utils cifs-utils.x86_64
|
||||
dnf -y install at wget bind-utils tar.x86_64
|
||||
|
||||
systemctl start atd
|
||||
systemctl enable atd
|
||||
systemctl status atd
|
||||
|
||||
-- Network setup
|
||||
----------------
|
||||
|
||||
nmcli connection show --active
|
||||
|
||||
nmcli connection modify enp1s0 ipv4.address 192.168.0.66/24
|
||||
nmcli connection modify enp1s0 ipv4.method manual ipv6.method ignore
|
||||
nmcli connection modify enp1s0 ipv4.gateway 192.168.0.1
|
||||
nmcli connection modify enp1s0 ipv4.dns 192.168.0.8
|
||||
nmcli connection modify enp1s0 ipv4.dns-search swgalaxy
|
||||
|
||||
nmcli connection modify enp2s0 ipv4.address 192.168.1.66/24
|
||||
nmcli connection modify enp2s0 ipv4.method manual ipv6.method ignore
|
||||
|
||||
hostnamectl set-hostname seedmachine.swgalaxy
|
||||
|
||||
# SELINUX=disabled
|
||||
/etc/selinux/config
|
||||
|
||||
systemctl stop firewalld
|
||||
systemctl disable firewalld
|
||||
|
||||
dnf install oracle-epel-release-el9.x86_64 oracle-database-preinstall-19c.x86_64
|
||||
dnf install -y rlwrap.x86_64
|
||||
|
||||
|
||||
# setup CIFS mounts
|
||||
groupadd smbuser --gid 1502
|
||||
useradd smbuser --uid 1502 -g smbuser -G smbuser
|
||||
|
||||
-- create credentials file for automount: /root/.smbcred
|
||||
username=vplesnila
|
||||
password=*****
|
||||
|
||||
mkdir -p /mnt/yavin4
|
||||
mkdir -p /mnt/unprotected
|
||||
|
||||
-- add in /etc/fstab
|
||||
//192.168.0.9/share /mnt/yavin4 cifs vers=3.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred,mfsymlinks,iocharset=utf8 0 0
|
||||
//192.168.0.9/unprotected /mnt/unprotected cifs vers=3.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred,mfsymlinks,iocharset=utf8 0 0
|
||||
|
||||
systemctl daemon-reload
|
||||
mount -a
|
||||
|
||||
# add oracle user in smbuser group
|
||||
cat /etc/group | grep smbuser
|
||||
|
||||
smbuser:x:1502:smbuser,oracle
|
||||
|
||||
# add /app FS
|
||||
dd if=/dev/zero of=app_01.img bs=1G count=40
|
||||
dd if=/dev/zero of=data_01.img bs=1G count=20
|
||||
dd if=/dev/zero of=data_02.img bs=1G count=20
|
||||
dd if=/dev/zero of=reco_01.img bs=1G count=20
|
||||
|
||||
virsh domblklist seedmachine --details
|
||||
virsh attach-disk seedmachine /vm/ssd0/seedmachine/app_01.img vdc --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk seedmachine /vm/ssd0/seedmachine/data_01.img vdd --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk seedmachine /vm/ssd0/seedmachine/data_02.img vde --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk seedmachine /vm/ssd0/seedmachine/reco_01.img vdf --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
|
||||
fdisk /dev/vdc
|
||||
fdisk /dev/vdd
|
||||
fdisk /dev/vde
|
||||
fdisk /dev/vdf
|
||||
|
||||
pvs
|
||||
pvcreate /dev/vdc1
|
||||
pvcreate /dev/vdd1
|
||||
pvcreate /dev/vde1
|
||||
pvcreate /dev/vdf1
|
||||
|
||||
vgs
|
||||
vgcreate vgapp /dev/vdc1
|
||||
vgcreate vgdata /dev/vdd1 /dev/vde1
|
||||
vgcreate vgreco /dev/vdf1
|
||||
|
||||
lvs
|
||||
lvcreate -n app -l 100%FREE vgapp
|
||||
lvcreate -n data -l 100%FREE vgdata
|
||||
lvcreate -n reco -l 100%FREE vgreco
|
||||
|
||||
mkfs.xfs /dev/mapper/vgapp-app
|
||||
mkfs.xfs /dev/mapper/vgdata-data
|
||||
mkfs.xfs /dev/mapper/vgreco-reco
|
||||
|
||||
mkdir -p /app /data /reco
|
||||
|
||||
# add in /etc/fstab
|
||||
/dev/mapper/vgapp-app /app xfs defaults 0 0
|
||||
/dev/mapper/vgdata-data /data xfs defaults 0 0
|
||||
/dev/mapper/vgreco-reco /reco xfs defaults 0 0
|
||||
|
||||
systemctl daemon-reload
|
||||
mount -a
|
||||
|
||||
chown -R oracle:oinstall /app /data /reco
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
2
divers/PC_boot_menu.txt
Normal file
2
divers/PC_boot_menu.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
AMD Ryzen - F7
|
||||
|
||||
9
divers/PDB$SEED_recompile_all.sql
Normal file
9
divers/PDB$SEED_recompile_all.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
alter pluggable database PDB$SEED close immediate instances=ALL;
|
||||
alter pluggable database PDB$SEED open read write instances=ALL;
|
||||
alter session set container=PDB$SEED;
|
||||
alter session set "_ORACLE_SCRIPT"=true;
|
||||
@?/rdbms/admin/utlrp
|
||||
alter session set "_ORACLE_SCRIPT"=false;
|
||||
alter session set container=CDB$ROOT;
|
||||
alter pluggable database PDB$SEED close immediate instances=ALL;
|
||||
alter pluggable database PDB$SEED open read only instances=ALL;
|
||||
157
divers/PDB_PITR_scratch_01.txt
Normal file
157
divers/PDB_PITR_scratch_01.txt
Normal file
@@ -0,0 +1,157 @@
|
||||
rman target /
|
||||
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tech/oracle/work/dataguard_ADNA/backup/ADNAPRD/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tech/oracle/work/dataguard_ADNA/backup/ADNAPRD/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tech/oracle/work/dataguard_ADNA/backup/ADNAPRD/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tech/oracle/work/dataguard_ADNA/backup/ADNAPRD/backupset/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||
release channel ch01;
|
||||
release channel ch02;
|
||||
release channel ch03;
|
||||
release channel ch04;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tech/oracle/work/dataguard_ADNA/backup/ADNAPRD/backupset/%d_%U_%s_%t.controlfile';
|
||||
backup current controlfile;
|
||||
release channel ch01;
|
||||
}
|
||||
|
||||
|
||||
sqlplus 'sys/"Secret00!"'@wayland.swgalaxy:1555/ADNAPRD_DGMGRL as sysdba
|
||||
sqlplus 'sys/"Secret00!"'@togoria.swgalaxy:1555/ADNADRP_DGMGRL as sysdba
|
||||
|
||||
|
||||
configure archivelog deletion policy to applied on all standby;
|
||||
|
||||
rman target='sys/"Secret00!"'@wayland.swgalaxy:1555/ADNAPRD_DGMGRL auxiliary='sys/"Secret00!"'@togoria.swgalaxy:1555/ADNADRP_DGMGRL
|
||||
|
||||
run
|
||||
{
|
||||
allocate channel pri01 device type disk;
|
||||
allocate channel pri02 device type disk;
|
||||
allocate channel pri03 device type disk;
|
||||
allocate channel pri04 device type disk;
|
||||
allocate channel pri05 device type disk;
|
||||
allocate channel pri06 device type disk;
|
||||
allocate channel pri07 device type disk;
|
||||
allocate channel pri08 device type disk;
|
||||
allocate channel pri09 device type disk;
|
||||
allocate channel pri10 device type disk;
|
||||
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
allocate auxiliary channel aux05 device type disk;
|
||||
allocate auxiliary channel aux06 device type disk;
|
||||
allocate auxiliary channel aux07 device type disk;
|
||||
allocate auxiliary channel aux08 device type disk;
|
||||
allocate auxiliary channel aux09 device type disk;
|
||||
allocate auxiliary channel aux10 device type disk;
|
||||
|
||||
duplicate database 'ADNA' for standby
|
||||
from active database using compressed backupset section size 512M;
|
||||
}
|
||||
|
||||
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ADNAPRD/dgmgrl/dr1ADNAPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ADNAPRD/dgmgrl/dr2ADNAPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ADNADRP/dgmgrl/dr1ADNADRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ADNADRP/dgmgrl/dr2ADNADRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
|
||||
rlwrap dgmgrl 'sys/"Secret00!"'@wayland.swgalaxy:1555/ADNAPRD_DGMGRL
|
||||
|
||||
create configuration ADNA as
|
||||
primary database is ADNAPRD
|
||||
connect identifier is 'wayland.swgalaxy:1555/ADNAPRD_DGMGRL';
|
||||
|
||||
add database ADNADRP
|
||||
as connect identifier is 'togoria.swgalaxy:1555/ADNADRP_DGMGRL'
|
||||
maintained as physical;
|
||||
|
||||
enable configuration;
|
||||
|
||||
edit database 'adnaprd' set property ArchiveLagTarget=0;
|
||||
edit database 'adnaprd' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'adnaprd' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'adnaprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit database 'adnadrp' set property ArchiveLagTarget=0;
|
||||
edit database 'adnadrp' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'adnadrp' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'adnadrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit instance 'ADNAPRD' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=wayland.swgalaxy)(PORT=1555))(CONNECT_DATA=(SERVICE_NAME=ADNAPRD_DGMGRL)(INSTANCE_NAME=ADNAPRD)(SERVER=DEDICATED)))';
|
||||
edit instance 'ADNADRP' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=togoria.swgalaxy)(PORT=1555))(CONNECT_DATA=(SERVICE_NAME=ADNADRP_DGMGRL)(INSTANCE_NAME=ADNADRP)(SERVER=DEDICATED)))';
|
||||
|
||||
show configuration
|
||||
validate database 'adnadrp'
|
||||
validate database 'adnaprd'
|
||||
|
||||
|
||||
|
||||
|
||||
create table heartbeat (ts TIMESTAMP);
|
||||
insert into heartbeat values (CURRENT_TIMESTAMP);
|
||||
commit;
|
||||
|
||||
|
||||
CREATE OR REPLACE PROCEDURE update_heartbeat AS
|
||||
BEGIN
|
||||
UPDATE heartbeat
|
||||
SET ts = SYSTIMESTAMP;
|
||||
COMMIT;
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.CREATE_JOB (
|
||||
job_name => 'HEARTBEAT_JOB',
|
||||
job_type => 'STORED_PROCEDURE',
|
||||
job_action => 'UPDATE_HEARTBEAT',
|
||||
start_date => SYSTIMESTAMP,
|
||||
repeat_interval => 'FREQ=SECONDLY; INTERVAL=1',
|
||||
enabled => FALSE
|
||||
);
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.ENABLE('HEARTBEAT_JOB');
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.DISABLE('HEARTBEAT_JOB');
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.DROP_JOB('HEARTBEAT_JOB');
|
||||
END;
|
||||
/
|
||||
|
||||
drop PROCEDURE update_heartbeat;
|
||||
|
||||
drop table heartbeat purge;
|
||||
|
||||
|
||||
run{
|
||||
set until time "TIMESTAMP'2026-02-21 15:50:00'";
|
||||
alter pluggable database RYLS close immediate instances=all;
|
||||
restore pluggable database RYLS;
|
||||
recover pluggable database RYLS;
|
||||
alter pluggable database RYLS open resetlogs instances=all;
|
||||
}
|
||||
20
divers/Purines_vs_Omega‑3.md
Normal file
20
divers/Purines_vs_Omega‑3.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Classement croisé Purines vs Oméga‑3
|
||||
|
||||
| Aliment | Purines (mg/100 g) | Catégorie purines | Oméga‑3 (g/100 g) | Catégorie oméga‑3 | Verdict croisé |
|
||||
|--------------------------|--------------------|-------------------|-------------------|-------------------|----------------|
|
||||
| Poulet (blanc/cuisse) | 150–175 | Modéré | ~0.05 | Pauvre | ⚠️ Peu d’intérêt nutritionnel, purines modérées mais quasi pas d’oméga‑3 |
|
||||
| Bœuf (muscle) | ~120 | Modéré | ~0.04 | Pauvre | ⚠️ Idem, faible en oméga‑3 |
|
||||
| Foie de bœuf | ~300 | Très élevé | ~0.10 | Pauvre | 🚫 À éviter (purines très élevées, peu d’oméga‑3) |
|
||||
| Sardine | ~210 | Élevé | ~0.80–0.90 | Moyen | ⚖️ Bon apport en oméga‑3 mais purines élevées |
|
||||
| Anchois | ~300 | Très élevé | ~0.90 | Moyen | 🚫 Risque goutte, malgré oméga‑3 |
|
||||
| Saumon | ~170 | Modéré | ~2.3–2.6 | Riche | ✅ Excellent compromis (oméga‑3 riches, purines modérées) |
|
||||
| Maquereau | ~145 | Modéré | ~1.4–1.8 | Riche | ✅ Très bon compromis |
|
||||
| Hareng | ~170 | Modéré | ~1.6–2.2 | Riche | ✅ Très bon compromis |
|
||||
| Truite | ~150 | Modéré | ~1.2–1.3 | Riche | ✅ Bon compromis |
|
||||
| Thon (rouge) | ~150 | Modéré | ~1.6–1.7 | Riche | ✅ Bon compromis |
|
||||
| Crevettes | ~150 | Modéré | ~0.30 | Moyen | ⚖️ Correct mais pas exceptionnel |
|
||||
| Crabe / Tourteau | ~150 | Modéré | ~0.45 | Moyen | ⚖️ Correct |
|
||||
| Homard / Langouste | ~135 | Modéré | ~0.52 | Moyen | ⚖️ Correct |
|
||||
| Moules | ~150 | Modéré | ~0.59–0.85 | Moyen | ⚖️ Correct |
|
||||
| Couteaux de mer | ~150 | Modéré | ~0.6 | Moyen | ⚖️ Correct |
|
||||
| Coquilles Saint‑Jacques | ~150–180 | Modéré | ~0.5–0.6 | Moyen | ⚖️ Correct |
|
||||
256
divers/RAC_19_OEL9_ASMLIB3_setup_01.txt
Normal file
256
divers/RAC_19_OEL9_ASMLIB3_setup_01.txt
Normal file
@@ -0,0 +1,256 @@
|
||||
# netwok setup on each node
|
||||
nmcli connection show --active
|
||||
|
||||
nmcli connection modify enp1s0 ipv4.address 192.168.0.95/24
|
||||
nmcli connection modify enp1s0 ipv4.method manual ipv6.method ignore
|
||||
nmcli connection modify enp1s0 ipv4.gateway 192.168.0.1
|
||||
nmcli connection modify enp1s0 ipv4.dns 192.168.0.8
|
||||
nmcli connection modify enp1s0 ipv4.dns-search swgalaxy
|
||||
|
||||
nmcli connection modify enp2s0 ipv4.address 192.168.1.95/24
|
||||
nmcli connection modify enp2s0 ipv4.method manual ipv6.method ignore
|
||||
|
||||
nmcli connection modify enp10s0 ipv4.address 192.168.2.95/24
|
||||
nmcli connection modify enp10s0 ipv4.method manual ipv6.method ignore
|
||||
|
||||
hostnamectl set-hostname rodia-db03.swgalaxy
|
||||
|
||||
# attach disks in each node
|
||||
virsh attach-disk ylesia-db03 /vm/ssd0/ylesia-rac/disk_array/asm_01.img vdd --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk ylesia-db03 /vm/ssd0/ylesia-rac/disk_array/asm_02.img vde --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk ylesia-db03 /vm/ssd0/ylesia-rac/disk_array/asm_03.img vdf --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk ylesia-db03 /vm/ssd0/ylesia-rac/disk_array/asm_04.img vdg --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk ylesia-db03 /vm/ssd0/ylesia-rac/disk_array/asm_05.img vdh --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
|
||||
|
||||
- unzip distrib in grid home
|
||||
- unzip last GIRU in a temporary location
|
||||
- apply GIRU in silent mode over the base GI distrib
|
||||
|
||||
# on each node
|
||||
##############
|
||||
|
||||
mkdir -p /app/oracle
|
||||
chmod 775 /app/oracle
|
||||
chown -R oracle:oinstall /app/oracle
|
||||
|
||||
cd /app/oracle/
|
||||
mkdir -p admin base grid oraInventory rdbms staging_area
|
||||
chmod 775 admin base grid oraInventory rdbms staging_area
|
||||
|
||||
chown -R oracle:oinstall admin rdbms staging_area
|
||||
chown -R grid:oinstall grid oraInventory base
|
||||
|
||||
su - grid
|
||||
mkdir -p /app/oracle/grid/product/19
|
||||
|
||||
|
||||
# on first node
|
||||
###############
|
||||
|
||||
# unzip distrib
|
||||
cd /app/oracle/grid/product/19
|
||||
unzip /mnt/yavin4/kit/Oracle/Grid_Infra/19/distrib/LINUX.X64_193000_grid_home.zip
|
||||
|
||||
# update Opatch
|
||||
rm -rf OPatch
|
||||
unzip /mnt/yavin4/kit/Oracle/opatch/p6880880_190000_Linux-x86-64.zip
|
||||
|
||||
cd /app/oracle/staging_area/
|
||||
unzip /mnt/yavin4/kit/Oracle/Grid_Infra/19/GIRU/GIRU_19.27/p37641958_190000_Linux-x86-64.zip
|
||||
|
||||
# apply the RU on this ORACLE_HOME
|
||||
# on first node as grid
|
||||
|
||||
export ORACLE_BASE=/app/oracle/base
|
||||
export ORACLE_HOME=/app/oracle/grid/product/19
|
||||
export PATH=$ORACLE_HOME/bin:$PATH
|
||||
|
||||
$ORACLE_HOME/gridSetup.sh -silent -applyRU /app/oracle/staging_area/37641958/36758186
|
||||
$ORACLE_HOME/gridSetup.sh -silent -applyRU /app/oracle/staging_area/37641958/37642901
|
||||
$ORACLE_HOME/gridSetup.sh -silent -applyRU /app/oracle/staging_area/37641958/37643161
|
||||
$ORACLE_HOME/gridSetup.sh -silent -applyRU /app/oracle/staging_area/37641958/37654975
|
||||
$ORACLE_HOME/gridSetup.sh -silent -applyRU /app/oracle/staging_area/37641958/37762426
|
||||
|
||||
# to satisfy ALL pre-requisits, to do on ALL nodes
|
||||
|
||||
dnf install -y $ORACLE_HOME/cv/rpm/cvuqdisk-1.0.10-1.rpm
|
||||
|
||||
# in /etc/security/limits.conf
|
||||
|
||||
# Oracle
|
||||
oracle soft stack 10240
|
||||
grid soft stack 10240
|
||||
grid soft memlock 3145728
|
||||
grid hard memlock 3145728
|
||||
|
||||
# in /etc/sysctl.conf
|
||||
|
||||
# other oracle settings
|
||||
kernel.panic = 1
|
||||
|
||||
|
||||
# temporary SWAP
|
||||
dd if=/dev/zero of=/mnt/unprotected/tmp/oracle/swap_20g.img bs=1G count=20
|
||||
mkswap /mnt/unprotected/tmp/oracle/swap_20g.img
|
||||
swapon /mnt/unprotected/tmp/oracle/swap_20g.img
|
||||
free -h
|
||||
|
||||
##############
|
||||
|
||||
# pre-check as grid
|
||||
export ORACLE_BASE=/app/oracle/base
|
||||
export ORACLE_HOME=/app/oracle/grid/product/19
|
||||
export PATH=$ORACLE_HOME/bin:$PATH
|
||||
|
||||
$ORACLE_HOME/runcluvfy.sh stage -pre crsinst -n ylesia-db01,ylesia-db02,ylesia-db03
|
||||
|
||||
|
||||
# ASM disks
|
||||
lsblk --list | egrep "vdd|vde|vdf|vdg|vdh"
|
||||
ls -ltr /dev/vd[d-h]
|
||||
|
||||
fdisk ..... all disk
|
||||
|
||||
|
||||
lsblk --list | egrep "vdd|vde|vdf|vdg|vdh"
|
||||
ls -ltr /dev/vd[d-h]1
|
||||
|
||||
# install asmlib on all nodes
|
||||
dnf install -y oracleasm-support-3.1.0-10.el9.x86_64.rpm
|
||||
dnf install -y oracleasmlib-3.1.0-6.el9.x86_64.rpm
|
||||
|
||||
systemctl start oracleasm.service
|
||||
|
||||
oracleasm configure -i
|
||||
|
||||
(answers: grid, asmdba and all default)
|
||||
|
||||
echo "kernel.io_uring_disabled = 0" >> /etc/sysctl.conf
|
||||
sysctl -p
|
||||
|
||||
# create ASM disks on first node
|
||||
oracleasm createdisk DATA_01 /dev/vdd1
|
||||
oracleasm createdisk DATA_02 /dev/vde1
|
||||
oracleasm createdisk DATA_03 /dev/vdf1
|
||||
oracleasm createdisk RECO_01 /dev/vdg1
|
||||
oracleasm createdisk RECO_02 /dev/vdh1
|
||||
|
||||
oracleasm scandisks
|
||||
oracleasm listdisks
|
||||
|
||||
# on other nodes, only scan and list ASL disks
|
||||
|
||||
# on first node, grid setup
|
||||
$ORACLE_HOME/gridSetup.sh
|
||||
|
||||
# RDBMS install
|
||||
###############
|
||||
|
||||
# unzip distrib
|
||||
mkdir -p /app/oracle/rdbms/product/19
|
||||
cd /app/oracle/rdbms/product/19
|
||||
unzip /mnt/yavin4/kit/Oracle/Oracle_Database_19/distrib/LINUX.X64_193000_db_home.zip
|
||||
|
||||
|
||||
# update Opatch
|
||||
rm -rf OPatch
|
||||
unzip /mnt/yavin4/kit/Oracle/opatch/p6880880_190000_Linux-x86-64.zip
|
||||
|
||||
# apply the RU on this ORACLE_HOME
|
||||
# on first node as oracle
|
||||
|
||||
export ORACLE_BASE=/app/oracle/base
|
||||
export ORACLE_HOME=/app/oracle/rdbms/product/19
|
||||
export PATH=$ORACLE_HOME/bin:$PATH
|
||||
|
||||
$ORACLE_HOME/runInstaller -silent -applyRU /app/oracle/staging_area/37641958/36758186
|
||||
$ORACLE_HOME/runInstaller -silent -applyRU /app/oracle/staging_area/37641958/37642901
|
||||
$ORACLE_HOME/runInstaller -silent -applyRU /app/oracle/staging_area/37641958/37643161
|
||||
$ORACLE_HOME/runInstaller -silent -applyRU /app/oracle/staging_area/37641958/37654975
|
||||
$ORACLE_HOME/runInstaller -silent -applyRU /app/oracle/staging_area/37641958/37762426
|
||||
|
||||
# install from a X session
|
||||
$ORACLE_HOME/runInstaller
|
||||
|
||||
# on all nodes
|
||||
chmod -R 775 /app/oracle/base/admin /app/oracle/base/diag
|
||||
|
||||
cat <<EOF! >> /etc/oratab
|
||||
SET19:/app/oracle/rdbms/product/19:N
|
||||
EOF!
|
||||
|
||||
|
||||
# using DBCA to create/delete database
|
||||
|
||||
export ORACLE_DB_NAME=AERON
|
||||
export ORACLE_UNQNAME=AERONPRD
|
||||
export PDB_NAME=REEK
|
||||
export NODE1=ylesia-db01
|
||||
export NODE2=ylesia-db02
|
||||
export NODE3=ylesia-db03
|
||||
export SYS_PASSWORD="Secret00!"
|
||||
export PDB_PASSWORD="Secret00!"
|
||||
|
||||
# create MULTITENANT database
|
||||
dbca -silent -createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-sid ${ORACLE_UNQNAME} \
|
||||
-gdbname ${ORACLE_UNQNAME} -responseFile NO_VALUE \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword ${SYS_PASSWORD} \
|
||||
-systemPassword ${SYS_PASSWORD} \
|
||||
-createAsContainerDatabase true \
|
||||
-numberOfPDBs 1 \
|
||||
-pdbName ${PDB_NAME} \
|
||||
-pdbAdminPassword ${PDB_PASSWORD} \
|
||||
-databaseType MULTIPURPOSE \
|
||||
-automaticMemoryManagement false \
|
||||
-totalMemory 3072 \
|
||||
-redoLogFileSize 128 \
|
||||
-emConfiguration NONE \
|
||||
-ignorePreReqs \
|
||||
-nodelist ${NODE1},${NODE2},${NODE3} \
|
||||
-storageType ASM \
|
||||
-diskGroupName +DATA \
|
||||
-recoveryGroupName +RECO \
|
||||
-useOMF true \
|
||||
-initparams db_name=${ORACLE_DB_NAME},db_unique_name=${ORACLE_UNQNAME},sga_max_size=3G,sga_target=3G,pga_aggregate_target=512M \
|
||||
-enableArchive true \
|
||||
-recoveryAreaDestination +RECO \
|
||||
-recoveryAreaSize 30720 \
|
||||
-asmsnmpPassword ${SYS_PASSWORD}
|
||||
|
||||
# create NON-CDB database
|
||||
dbca -silent -createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-sid ${ORACLE_UNQNAME} \
|
||||
-gdbname ${ORACLE_UNQNAME} -responseFile NO_VALUE \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword ${SYS_PASSWORD} \
|
||||
-systemPassword ${SYS_PASSWORD} \
|
||||
-createAsContainerDatabase false \
|
||||
-databaseType MULTIPURPOSE \
|
||||
-automaticMemoryManagement false \
|
||||
-totalMemory 3072 \
|
||||
-redoLogFileSize 128 \
|
||||
-emConfiguration NONE \
|
||||
-ignorePreReqs \
|
||||
-nodelist ${NODE1},${NODE2},${NODE3} \
|
||||
-storageType ASM \
|
||||
-diskGroupName +DATA \
|
||||
-recoveryGroupName +RECO \
|
||||
-useOMF true \
|
||||
-initparams db_name=${ORACLE_DB_NAME},db_unique_name=${ORACLE_UNQNAME},sga_max_size=3G,sga_target=3G,pga_aggregate_target=512M \
|
||||
-enableArchive true \
|
||||
-recoveryAreaDestination +RECO \
|
||||
-recoveryAreaSize 30720 \
|
||||
-asmsnmpPassword ${SYS_PASSWORD}
|
||||
|
||||
|
||||
# delete database
|
||||
dbca -silent -deleteDatabase \
|
||||
-sourceDB AERONPRD \
|
||||
-sysPassword ${SYS_PASSWORD} \
|
||||
-forceArchiveLogDeletion
|
||||
|
||||
86
divers/SuSE_install_01.txt
Normal file
86
divers/SuSE_install_01.txt
Normal file
@@ -0,0 +1,86 @@
|
||||
#############
|
||||
# VM creation
|
||||
#############
|
||||
|
||||
mkdir /vm/ssd0/aquaris
|
||||
|
||||
qemu-img create -f raw /vm/ssd0/aquaris/root_01.img 64G
|
||||
|
||||
virt-install \
|
||||
--graphics vnc,password=secret,listen=0.0.0.0 \
|
||||
--name=aquaris \
|
||||
--vcpus=4 \
|
||||
--memory=4096 \
|
||||
--network bridge=br0 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/vm/hdd0/_kit_/openSUSE-Leap-15.5-NET-x86_64-Build491.1-Media.iso \
|
||||
--disk /vm/ssd0/aquaris/root_01.img \
|
||||
--os-variant=opensuse15.4
|
||||
|
||||
####################
|
||||
# SuSE configuration
|
||||
####################
|
||||
|
||||
# network interfaces
|
||||
/etc/sysconfig/network/ifcfg-eth0
|
||||
/etc/sysconfig/network/ifcfg-eth1
|
||||
|
||||
#DNS
|
||||
/run/netconfig/resolv.conf
|
||||
# set NETCONFIG_DNS_POLICY="auto" in /etc/sysconfig/network/config
|
||||
|
||||
# gateway
|
||||
/etc/sysconfig/network/routes
|
||||
|
||||
# delete unwanted statis enteries in /etc/hosts
|
||||
|
||||
##############
|
||||
# VM customize
|
||||
##############
|
||||
|
||||
qemu-img create -f raw /vm/ssd0/aquaris/app_01.img 60G
|
||||
dd if=/dev/zero of=/vm/ssd0/aquaris/data_01.img bs=1G count=30
|
||||
dd if=/dev/zero of=/vm/ssd0/aquaris/backup_01.img bs=1G count=20
|
||||
|
||||
virsh domblklist aquaris --details
|
||||
|
||||
virsh attach-disk aquaris /vm/ssd0/aquaris/app_01.img vdb --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk aquaris /vm/ssd0/aquaris/data_01.img vdc --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
virsh attach-disk aquaris /vm/ssd0/aquaris/backup_01.img vdd --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
|
||||
btrfs device scan
|
||||
btrfs filesystem show
|
||||
|
||||
mkfs.btrfs /dev/vdb
|
||||
mkfs.btrfs /dev/vdc
|
||||
mkfs.btrfs /dev/vdd
|
||||
|
||||
|
||||
# create mount points
|
||||
mkdir /app /data /backup
|
||||
|
||||
# add in /etc/fstab
|
||||
UUID=fe1756c7-a062-40ed-921a-9fb1c12d8d51 /app btrfs defaults 0 0
|
||||
UUID=3b147a0d-ca13-46f5-aa75-72f5a2b9fd4c /data btrfs defaults 0 0
|
||||
UUID=d769e88b-5ec4-4e0a-93cd-1f2a9deecc8b /backup btrfs defaults 0 0
|
||||
|
||||
# mount all
|
||||
mount -a
|
||||
|
||||
btrfs subvolume create /backup/current
|
||||
mkdir /backup/.snapshots
|
||||
|
||||
btrfs subvolume snapshot /backup/current /backup/.snapshots/01
|
||||
btrfs subvolume snapshot /backup/current /backup/.snapshots/02
|
||||
|
||||
btrfs subvolume list /backup/current
|
||||
|
||||
btrfs subvolume show /backup/.snapshots/01
|
||||
btrfs subvolume show /backup/.snapshots/02
|
||||
|
||||
tree -a /backup
|
||||
|
||||
btrfs subvolume delete /backup/.snapshots/01
|
||||
btrfs subvolume delete /backup/.snapshots/02
|
||||
btrfs subvolume delete /backup/current
|
||||
|
||||
222
divers/TLS_connection_01.txt
Normal file
222
divers/TLS_connection_01.txt
Normal file
@@ -0,0 +1,222 @@
|
||||
# https://wadhahdaouehi.tn/2023/05/oracle-database-server-client-certificate-tcps-oracle-19c/
|
||||
|
||||
_____ _ _
|
||||
/ ____| (_) | |
|
||||
| (___ ___ _ ____ _____ _ __ ___ _ __| | ___
|
||||
\___ \ / _ \ '__\ \ / / _ \ '__| / __| |/ _` |/ _ \
|
||||
____) | __/ | \ V / __/ | \__ \ | (_| | __/
|
||||
|_____/ \___|_| \_/ \___|_| |___/_|\__,_|\___|
|
||||
|
||||
|
||||
# Create a new auto-login wallet
|
||||
export WALLET_DIRECTORY=/home/oracle/poc_tls/wallet
|
||||
export WALLET_PASSWORD="VaeVictis00!"
|
||||
|
||||
orapki wallet create -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -auto_login_local
|
||||
|
||||
# Create a self-signed certificate and load it into the wallet
|
||||
export CERT_VALIDITY_DAYS=3650
|
||||
|
||||
orapki wallet add -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -dn "CN=`hostname`" -keysize 2048 -self_signed -validity ${CERT_VALIDITY_DAYS}
|
||||
|
||||
# Check the contents of the wallet
|
||||
orapki wallet display -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD}
|
||||
|
||||
Note: The self-signed certificate is both a user and trusted certificate
|
||||
|
||||
# Export the certificate to load it into the client wallet later
|
||||
export CERT_EXPORT_PATH=/home/oracle/poc_tls/export
|
||||
orapki wallet export -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -dn "CN= `hostname` " -cert ${CERT_EXPORT_PATH}/`hostname`-certificate.crt
|
||||
|
||||
|
||||
_____ _ _ _ _ _
|
||||
/ ____| (_) | | (_) | |
|
||||
| | | |_ ___ _ __ | |_ ___ _ __| | ___
|
||||
| | | | |/ _ \ '_ \| __| / __| |/ _` |/ _ \
|
||||
| |____| | | __/ | | | |_ \__ \ | (_| | __/
|
||||
\_____|_|_|\___|_| |_|\__| |___/_|\__,_|\___|
|
||||
|
||||
|
||||
# Create a new auto-login wallet
|
||||
export WALLET_DIRECTORY=/mnt/yavin4/tmp/00000/wayland/wallet
|
||||
export WALLET_PASSWORD="AdVictoriam00!"
|
||||
|
||||
orapki wallet create -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -auto_login_local
|
||||
|
||||
# Create a self-signed certificate and load it into the wallet
|
||||
export CERT_VALIDITY_DAYS=3650
|
||||
|
||||
orapki wallet add -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -dn "CN=`hostname`" -keysize 2048 -self_signed -validity ${CERT_VALIDITY_DAYS}
|
||||
|
||||
# Check the contents of the wallet
|
||||
orapki wallet display -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD}
|
||||
|
||||
Note: The self-signed certificate is both a user and trusted certificate
|
||||
|
||||
# Export the certificate to load it into the client wallet later
|
||||
export CERT_EXPORT_PATH="/mnt/yavin4/tmp/00000/wayland/cert_expo"
|
||||
orapki wallet export -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -dn "CN= `hostname` " -cert ${CERT_EXPORT_PATH}/`hostname`-certificate.crt
|
||||
|
||||
|
||||
_____ _ _ __ _ _ _
|
||||
/ ____| | | (_)/ _(_) | | | |
|
||||
| | ___ _ __| |_ _| |_ _ ___ __ _| |_ ___ _____ _____| |__ __ _ _ __ __ _ ___
|
||||
| | / _ \ '__| __| | _| |/ __/ _` | __/ _ \ / _ \ \/ / __| '_ \ / _` | '_ \ / _` |/ _ \
|
||||
| |___| __/ | | |_| | | | | (_| (_| | || __/ | __/> < (__| | | | (_| | | | | (_| | __/
|
||||
\_____\___|_| \__|_|_| |_|\___\__,_|\__\___| \___/_/\_\___|_| |_|\__,_|_| |_|\__, |\___|
|
||||
__/ |
|
||||
|___/
|
||||
|
||||
Note: Both Server/Client should trust each other
|
||||
|
||||
# Load the client certificate into the server wallet
|
||||
export WALLET_DIRECTORY=/mnt/yavin4/tmp/00000/bakura/wallet
|
||||
export WALLET_PASSWORD="VaeVictis00!"
|
||||
export CERT_EXPORT_FILE="/mnt/yavin4/tmp/00000/wayland/cert_expo/wayland.swgalaxy-certificate.crt"
|
||||
|
||||
orapki wallet add -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -trusted_cert -cert ${CERT_EXPORT_FILE}
|
||||
# Check the contents of the wallet
|
||||
orapki wallet display -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD}
|
||||
|
||||
|
||||
# Load the server certificate into the client wallet
|
||||
export WALLET_DIRECTORY=/mnt/yavin4/tmp/00000/wayland/wallet
|
||||
export WALLET_PASSWORD="AdVictoriam00!"
|
||||
export CERT_EXPORT_FILE="/mnt/yavin4/tmp/00000/bakura/cert_expo/bakura.swgalaxy-certificate.crt"
|
||||
|
||||
orapki wallet add -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD} -trusted_cert -cert ${CERT_EXPORT_FILE}
|
||||
# Check the contents of the wallet
|
||||
orapki wallet display -wallet ${WALLET_DIRECTORY} -pwd ${WALLET_PASSWORD}
|
||||
|
||||
|
||||
_ _ _ _
|
||||
| | (_) | | | |
|
||||
| | _ ___| |_ ___ _ __ ___ _ __ ___ ___| |_ _ _ _ __
|
||||
| | | / __| __/ _ \ '_ \ / _ \ '__| / __|/ _ \ __| | | | '_ \
|
||||
| |____| \__ \ || __/ | | | __/ | \__ \ __/ |_| |_| | |_) |
|
||||
|______|_|___/\__\___|_| |_|\___|_| |___/\___|\__|\__,_| .__/
|
||||
| |
|
||||
|_|
|
||||
|
||||
Note: I didn't succeed the LISTENER setup using a custom TNS_ADMIN or using /etc/listener.ora file
|
||||
|
||||
rm -rf /etc/listener.ora
|
||||
rm -rf /etc/tnsnames.ora
|
||||
|
||||
|
||||
# I'm using a read-only ORACLE_HOME
|
||||
cat $(orabasehome)/network/admin/sqlnet.ora
|
||||
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/00000/bakura/wallet)
|
||||
)
|
||||
)
|
||||
|
||||
SQLNET.AUTHENTICATION_SERVICES = (TCPS,BEQ,NTP)
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
|
||||
|
||||
cat $(orabasehome)/network/admin/listener.ora
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/00000/bakura/wallet)
|
||||
)
|
||||
)
|
||||
|
||||
LISTENER_DEMO =
|
||||
(DESCRIPTION_LIST =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = bakura.swgalaxy)(PORT = 1600))
|
||||
)
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCPS)(HOST = bakura.swgalaxy)(PORT = 1700))
|
||||
)
|
||||
)
|
||||
|
||||
# start specific listener
|
||||
lsnrctl start LISTENER_DEMO
|
||||
|
||||
# register the database in the listener; note that TCPS adress was not required
|
||||
alter system set local_listener='(DESCRIPTION_LIST = (DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = bakura.swgalaxy)(PORT = 1600)) ) )' scope=both sid='*';
|
||||
alter system register;
|
||||
|
||||
Note: I don't explicitly specified TCPS adress but TCPS connections will be OK
|
||||
|
||||
_____ _ _ _ _
|
||||
/ ____| (_) | | | |
|
||||
| | | |_ ___ _ __ | |_ ___ ___| |_ _ _ _ __
|
||||
| | | | |/ _ \ '_ \| __| / __|/ _ \ __| | | | '_ \
|
||||
| |____| | | __/ | | | |_ \__ \ __/ |_| |_| | |_) |
|
||||
\_____|_|_|\___|_| |_|\__| |___/\___|\__|\__,_| .__/
|
||||
| |
|
||||
|_|
|
||||
Note: On client side, custom TNS_ADMIN worked
|
||||
|
||||
export TNS_ADMIN=/mnt/yavin4/tmp/00000/wayland/tns_admin
|
||||
|
||||
cd $TNS_ADMIN
|
||||
|
||||
cat sqlnet.ora
|
||||
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/00000/wayland/wallet)
|
||||
)
|
||||
)
|
||||
|
||||
SQLNET.AUTHENTICATION_SERVICES = (TCPS,BEQ,NTP)
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
|
||||
|
||||
cat tnsnames.ora
|
||||
|
||||
HUTTPRD_tcp =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = bakura.swgalaxy)(PORT = 1600))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = HUTTPRD)
|
||||
)
|
||||
)
|
||||
|
||||
HUTTPRD_tcps =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCPS)(HOST = bakura.swgalaxy)(PORT = 1700))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = HUTTPRD)
|
||||
)
|
||||
)
|
||||
|
||||
# JABBA is a PDB inside HUTTPRD
|
||||
JABBA_tcps =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCPS)(HOST = bakura.swgalaxy)(PORT = 1700))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = JABBA)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# check connections
|
||||
connect c##globaldba/"secret"@HUTTPRD_tcp
|
||||
connect c##globaldba/"secret"@HUTTPRD_tcps
|
||||
connect c##globaldba/"secret"@JABBA_tcps
|
||||
|
||||
# check for connection protocol: tcp/tcps
|
||||
select SYS_CONTEXT('USERENV','NETWORK_PROTOCOL') from dual;
|
||||
93
divers/ash_plsql_01.txt
Normal file
93
divers/ash_plsql_01.txt
Normal file
@@ -0,0 +1,93 @@
|
||||
connect user1/secret@//bakura.swgalaxy:1521/WOMBAT
|
||||
|
||||
create table tpl1 as select * from dba_extents;
|
||||
create table tpl2 as (select * from tpl1 union all select * from tpl1);
|
||||
create table tpl3 as (select * from tpl2 union all select * from tpl2);
|
||||
|
||||
|
||||
select /* MYQ1 */
|
||||
count(*)
|
||||
from
|
||||
tpl1
|
||||
join tpl2 on tpl1.bytes=tpl2.bytes
|
||||
join tpl3 on tpl1.segment_name=tpl3.segment_name
|
||||
/
|
||||
|
||||
|
||||
--------------------------------------------------------
|
||||
-- DDL for Package PACKAGE1
|
||||
--------------------------------------------------------
|
||||
|
||||
CREATE OR REPLACE EDITIONABLE PACKAGE "USER1"."PACKAGE1" AS
|
||||
|
||||
PROCEDURE PROC1;
|
||||
PROCEDURE PROC2;
|
||||
PROCEDURE PROC3;
|
||||
|
||||
END PACKAGE1;
|
||||
|
||||
/
|
||||
|
||||
|
||||
--------------------------------------------------------
|
||||
-- DDL for Package Body PACKAGE1
|
||||
--------------------------------------------------------
|
||||
|
||||
CREATE OR REPLACE EDITIONABLE PACKAGE BODY "USER1"."PACKAGE1" AS
|
||||
|
||||
PROCEDURE proc1 AS
|
||||
rr NUMBER;
|
||||
BEGIN
|
||||
SELECT /* MYQ2 */
|
||||
COUNT(*)
|
||||
INTO rr
|
||||
FROM
|
||||
tpl1
|
||||
JOIN tpl2 ON tpl1.bytes = tpl2.bytes
|
||||
JOIN tpl3 ON tpl1.segment_name = tpl3.segment_name;
|
||||
|
||||
END;
|
||||
|
||||
PROCEDURE proc2 AS
|
||||
z NUMBER;
|
||||
BEGIN
|
||||
SELECT /* MYQ3 */
|
||||
COUNT(*)
|
||||
INTO z
|
||||
FROM
|
||||
tpl1
|
||||
JOIN tpl2 ON tpl1.bytes = tpl2.bytes
|
||||
JOIN tpl3 ON tpl1.segment_name = tpl3.segment_name;
|
||||
|
||||
END;
|
||||
|
||||
|
||||
PROCEDURE proc3 AS
|
||||
v NUMBER;
|
||||
BEGIN
|
||||
SELECT /* MYQ4 */
|
||||
COUNT(*)
|
||||
INTO v
|
||||
FROM
|
||||
tpl1
|
||||
JOIN tpl2 ON tpl1.bytes = tpl2.bytes
|
||||
JOIN tpl3 ON tpl1.segment_name = tpl3.segment_name;
|
||||
|
||||
END;
|
||||
|
||||
|
||||
END package1;
|
||||
|
||||
/
|
||||
|
||||
|
||||
SQL> @ash/ashtop sql_id,TOP_LEVEL_SQL_ID,PLSQL_ENTRY_OBJECT_ID,PLSQL_ENTRY_SUBPROGRAM_ID "username='USER1'" sysdate-1/24 sysdate
|
||||
|
||||
Total Distinct Distinct
|
||||
Seconds AAS %This SQL_ID TOP_LEVEL_SQL PLSQL_ENTRY_OBJECT_ID PLSQL_ENTRY_SUBPROGRAM_ID FIRST_SEEN LAST_SEEN Execs Seen Tstamps
|
||||
--------- ------- ------- ------------- ------------- --------------------- ------------------------- ------------------- ------------------- ---------- --------
|
||||
105 .0 41% | a0dhc0nj62mk1 8ybf2rvtac57c 33008 3 2023-07-19 20:45:23 2023-07-19 20:47:07 1 105
|
||||
104 .0 41% | a0dhc0nj62mk1 25ju18ztqn751 33008 1 2023-07-19 20:34:23 2023-07-19 20:36:06 1 104
|
||||
42 .0 16% | a0dhc0nj62mk1 cum98j5xfkk62 33008 2 2023-07-19 20:44:37 2023-07-19 20:45:18 1 42
|
||||
|
||||
|
||||
8
divers/certbot_renew_01.txt
Normal file
8
divers/certbot_renew_01.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/memo.dbaoracle.fr -d memo.dbaoracle.fr
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/support.dbaoracle.fr -d support.dbaoracle.fr
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/public.dbaoracle.fr -d public.dbaoracle.fr
|
||||
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/sabnzbd.dbaoracle.fr -d sabnzbd.dbaoracle.fr
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/lidarr.dbaoracle.fr -d lidarr.dbaoracle.fr
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/sonarr.dbaoracle.fr -d sonarr.dbaoracle.fr
|
||||
certbot certonly --webroot --webroot-path /app/persistent_docker/nginx/www/radarr.dbaoracle.fr -d radarr.dbaoracle.fr
|
||||
88
divers/clone_oracle_home_golden_image_01.txt
Normal file
88
divers/clone_oracle_home_golden_image_01.txt
Normal file
@@ -0,0 +1,88 @@
|
||||
-- https://rene-ace.com/how-to-clone-an-oracle-home-in-19c/
|
||||
-----------------------------------------------------------
|
||||
|
||||
cd $ORACLE_HOME/rdbms/lib/
|
||||
cat config.c | grep define
|
||||
|
||||
---------------------------->
|
||||
#define SS_DBA_GRP "dba"
|
||||
#define SS_OPER_GRP "oper"
|
||||
#define SS_ASM_GRP ""
|
||||
#define SS_BKP_GRP "backupdba"
|
||||
#define SS_DGD_GRP "dgdba"
|
||||
#define SS_KMT_GRP "kmdba"
|
||||
#define SS_RAC_GRP "racdba"
|
||||
<----------------------------
|
||||
|
||||
$ORACLE_HOME/runInstaller -silent -createGoldImage -destinationLocation /app/oracle/staging_area
|
||||
|
||||
cd /app/oracle/staging_area
|
||||
unzip -v db_home_2023-08-16_02-20-39PM.zip
|
||||
|
||||
mkdir -p /app/oracle/product/19.20
|
||||
cd /app/oracle/product/19.20
|
||||
unzip /app/oracle/staging_area/db_home_2023-08-16_02-20-39PM.zip
|
||||
|
||||
unset ORACLE_HOME ORACLE_SID ORACLE_RSID ORACLE_UNQNAME ORACLE_BASE
|
||||
|
||||
export ORACLE_HOME=/app/oracle/product/19.20
|
||||
export ORACLE_HOSTNAME=togoria
|
||||
export ORA_INVENTORY=/app/oracle/oraInventory
|
||||
export NODE1_HOSTNAME=togoria
|
||||
# export NODE2_HOSTNAME=reneace02
|
||||
export ORACLE_BASE=/app/oracle/base
|
||||
|
||||
|
||||
# current
|
||||
# required only IS is OEL8
|
||||
export CV_ASSUME_DISTID=OEL7.8
|
||||
|
||||
${ORACLE_HOME}/runInstaller -ignorePrereq -waitforcompletion -silent \
|
||||
-responseFile ${ORACLE_HOME}/install/response/db_install.rsp \
|
||||
oracle.install.option=INSTALL_DB_SWONLY \
|
||||
ORACLE_HOSTNAME=${ORACLE_HOSTNAME} \
|
||||
UNIX_GROUP_NAME=oinstall \
|
||||
INVENTORY_LOCATION=${ORA_INVENTORY} \
|
||||
ORACLE_HOME=${ORACLE_HOME} \
|
||||
ORACLE_BASE=${ORACLE_BASE} \
|
||||
oracle.install.db.OSDBA_GROUP=dba \
|
||||
oracle.install.db.OSOPER_GROUP=oper \
|
||||
oracle.install.db.OSBACKUPDBA_GROUP=backupdba \
|
||||
oracle.install.db.OSDGDBA_GROUP=dgdba \
|
||||
oracle.install.db.OSKMDBA_GROUP=kmdba \
|
||||
oracle.install.db.OSRACDBA_GROUP=racdba
|
||||
|
||||
|
||||
# original
|
||||
${ORACLE_HOME}/runInstaller -ignorePrereq -waitforcompletion -silent \
|
||||
-responseFile ${ORACLE_HOME}/install/response/db_install.rsp \
|
||||
oracle.install.option=INSTALL_DB_SWONLY \
|
||||
ORACLE_HOSTNAME=${ORACLE_HOSTNAME} \
|
||||
UNIX_GROUP_NAME=oinstall \
|
||||
INVENTORY_LOCATION=${ORA_INVENTORY} \
|
||||
SELECTED_LANGUAGES=en \
|
||||
ORACLE_HOME=${ORACLE_HOME} \
|
||||
ORACLE_BASE=${ORACLE_BASE} \
|
||||
oracle.install.db.InstallEdition=EE \
|
||||
oracle.install.db.OSDBA_GROUP=dba \
|
||||
oracle.install.db.OSOPER_GROUP=dba \
|
||||
oracle.install.db.OSBACKUPDBA_GROUP=dba \
|
||||
oracle.install.db.OSDGDBA_GROUP=dba \
|
||||
oracle.install.db.OSKMDBA_GROUP=dba \
|
||||
oracle.install.db.OSRACDBA_GROUP=dba \
|
||||
oracle.install.db.CLUSTER_NODES=${NODE1_HOSTNAME},${NODE2_HOSTNAME} \
|
||||
oracle.install.db.isRACOneInstall=false \
|
||||
oracle.install.db.rac.serverpoolCardinality=0 \
|
||||
oracle.install.db.config.starterdb.type=GENERAL_PURPOSE \
|
||||
oracle.install.db.ConfigureAsContainerDB=false \
|
||||
SECURITY_UPDATES_VIA_MYORACLESUPPORT=false \
|
||||
DECLINE_SECURITY_UPDATES=true
|
||||
|
||||
|
||||
# check ORACLE homes in inventory
|
||||
cat /app/oracle/oraInventory/ContentsXML/inventory.xml | grep "HOME NAME"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
115
divers/dataguard_21_RAC_01.txt
Normal file
115
divers/dataguard_21_RAC_01.txt
Normal file
@@ -0,0 +1,115 @@
|
||||
rman target /
|
||||
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||
release channel ch01;
|
||||
release channel ch02;
|
||||
release channel ch03;
|
||||
release channel ch04;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/%d_%U_%s_%t.controlfile';
|
||||
backup current controlfile;
|
||||
release channel ch01;
|
||||
}
|
||||
|
||||
srvctl add database -d HUTTPRD -o /app/oracle/product/19 -p '+DATA/HUTTPRD/spfile.ora'
|
||||
|
||||
~~ create passwordfile on ASM; if the DB is not yet registered on CRS, you will get a WARNING
|
||||
orapwd FILE='+DATA/HUTTPRD/orapwHUTTPRD' ENTRIES=10 DBUNIQUENAME='HUTTPRD' password="Secret00!"
|
||||
srvctl modify database -d HUTTPRD -pwfile '+DATA/HUTTPRD/orapwHUTTPRD'
|
||||
|
||||
srvctl add instance -d HUTTPRD -i HUTTPRD1 -n ylesia-db01
|
||||
srvctl add instance -d HUTTPRD -i HUTTPRD2 -n ylesia-db02
|
||||
|
||||
|
||||
alias HUTTPRD='rlwrap sqlplus sys/"Secret00!"@ylesia-scan/HUTTPRD as sysdba'
|
||||
alias HUTTDRP='rlwrap sqlplus sys/"Secret00!"@rodia-scan/HUTTDRP as sysdba'
|
||||
|
||||
|
||||
run
|
||||
{
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
duplicate database 'HUTT' for standby backup location '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/';
|
||||
}
|
||||
|
||||
|
||||
srvctl add database -d HUTTDRP -o /app/oracle/product/21 -p '+DATA/HUTTDRP/spfile.ora'
|
||||
srvctl modify database -d HUTTDRP -r physical_standby -n HUTT -s MOUNT
|
||||
|
||||
srvctl add instance -d HUTTDRP -i HUTTDRP1 -n rodia-db01
|
||||
srvctl add instance -d HUTTDRP -i HUTTDRP2 -n rodia-db02
|
||||
|
||||
|
||||
# copy passwordfile from primary to standby
|
||||
ASMCMD [+DATA/HUTTPRD] > pwcopy +DATA/HUTTPRD/PASSWORD/pwdhuttprd.274.1137773649 /tmp
|
||||
scp /tmp/pwdhuttprd.274.1137773649 rodia-db02:/tmp
|
||||
ASMCMD [+DATA/HUTTDRP] > pwcopy /tmp/pwdhuttprd.274.1137773649 +DATA/HUTTDRP/orapwhuttdrp
|
||||
|
||||
srvctl modify database -db HUTTDRP -pwfile '+DATA/HUTTDRP/orapwhuttdrp'
|
||||
|
||||
|
||||
alter system set dg_broker_config_file1='+DATA/HUTTPRD/dg_broker_01.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/HUTTPRD/dg_broker_02.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
alter system set dg_broker_config_file1='+DATA/HUTTDRP/dg_broker_01.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/HUTTDRP/dg_broker_02.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
|
||||
select GROUP#,THREAD#,MEMBERS,STATUS, BYTES/(1024*1024) Mb from v$log;
|
||||
select GROUP#,THREAD#,STATUS, BYTES/(1024*1024) Mb from v$standby_log;
|
||||
|
||||
set lines 256
|
||||
col MEMBER for a80
|
||||
select * from v$logfile;
|
||||
|
||||
|
||||
-- create standby redologs
|
||||
select 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
select distinct 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
|
||||
-- clear / drop standby redologs
|
||||
select 'ALTER DATABASE CLEAR LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
select 'ALTER DATABASE DROP STANDBY LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
|
||||
|
||||
dgmgrl
|
||||
DGMGRL> connect sys/"Secret00!"@ylesia-scan:1521/HUTTPRD
|
||||
DGMGRL> create configuration HUTT as primary database is HUTTPRD connect identifier is ylesia-scan:1521/HUTTPRD;
|
||||
DGMGRL> add database HUTTDRP as connect identifier is rodia-scan:1521/HUTTDRP;
|
||||
|
||||
DGMGRL> enable configuration;
|
||||
DGMGRL> show configuration;
|
||||
|
||||
DGMGRL> edit database 'huttdrp' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'huttdrp' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'huttdrp' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'huttdrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> edit database 'huttprd' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'huttprd' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'huttprd' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'huttprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> show configuration;
|
||||
|
||||
|
||||
|
||||
RMAN> configure archivelog deletion policy to applied on all standby;
|
||||
|
||||
# if incremental recover from source is required
|
||||
RMAN> recover database from service 'ylesia-scan/HUTTPRD' using compressed backupset section size 2G;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
162
divers/dataguard_cascade_routes_01.txt
Normal file
162
divers/dataguard_cascade_routes_01.txt
Normal file
@@ -0,0 +1,162 @@
|
||||
Primary: ylesia-scan:1521/HUTTPRD
|
||||
Dataguard: rodia-scan:1521/HUTTDRP
|
||||
Cascade 1: kamino:1521/HUTTCA1
|
||||
Far sync: mandalore:1521/HUTTFAR
|
||||
Remote dataguard: taris:1521/HUTTREM
|
||||
|
||||
alias HUTTPRD='rlwrap sqlplus sys/"Secret00!"@ylesia-scan:1521/HUTTPRD as sysdba'
|
||||
alias HUTTPRD1='rlwrap sqlplus sys/"Secret00!"@ylesia-db01-vip:1521/HUTTPRD as sysdba'
|
||||
alias HUTTPRD2='rlwrap sqlplus sys/"Secret00!"@ylesia-db02-vip:1521/HUTTPRD as sysdba'
|
||||
alias HUTTDRP='rlwrap sqlplus sys/"Secret00!"@rodia-scan:1521/HUTTDRP as sysdba'
|
||||
alias HUTTCA1='rlwrap sqlplus sys/"Secret00!"@kamino:1521/HUTTCA1 as sysdba'
|
||||
alias HUTTFAR='rlwrap sqlplus sys/"Secret00!"@mandalore:1521/HUTTFAR as sysdba'
|
||||
alias HUTTREM='rlwrap sqlplus sys/"Secret00!"@taris:1521/HUTTREM as sysdba'
|
||||
|
||||
|
||||
run
|
||||
{
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
duplicate database 'HUTT' for standby backup location '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/RAC/21/backupset/';
|
||||
}
|
||||
|
||||
|
||||
run
|
||||
{
|
||||
allocate channel pri01 device type disk;
|
||||
allocate channel pri02 device type disk;
|
||||
allocate channel pri03 device type disk;
|
||||
allocate channel pri04 device type disk;
|
||||
recover database from service 'ylesia-scan:1521/HUTTPRD' using compressed backupset section size 1G;
|
||||
}
|
||||
|
||||
alter database create standby controlfile as '/mnt/yavin4/tmp/00000/HUTTPRD1.stdby';
|
||||
alter database create far sync instance controlfile as '/mnt/yavin4/tmp/00000/HUTTPRD1.far';
|
||||
|
||||
dgmgrl
|
||||
DGMGRL> connect sys/"Secret00!"@ylesia-scan:1521/HUTTPRD
|
||||
DGMGRL> add database HUTTCA1 as connect identifier is kamino:1521/HUTTCA1;
|
||||
DGMGRL> add database HUTTREM as connect identifier is taris:1521/HUTTREM
|
||||
DGMGRL> add far_sync HUTTFAR as connect identifier is mandalore:1521/HUTTFAR;
|
||||
|
||||
DGMGRL> show database 'huttprd' redoroutes;
|
||||
DGMGRL> show database 'huttdrp' redoroutes;
|
||||
|
||||
# routes config ###########################################################################
|
||||
|
||||
# without FAR SYNC: main dataguard relies redo to cascade
|
||||
DGMGRL> edit database huttprd set property redoroutes = '(local:huttdrp)(huttdrp:huttca1)';
|
||||
DGMGRL> edit database huttdrp set property redoroutes = '(huttprd:huttca1)(local:huttprd)';
|
||||
|
||||
# FAR SYNC built but not activated: main dataguard relies redo to cascade and remote dataguard
|
||||
DGMGRL> edit database huttprd set property redoroutes = '(local:huttdrp)(huttdrp:huttca1)';
|
||||
DGMGRL> edit database huttdrp set property redoroutes = '(huttprd:huttca1,huttrem)(local:huttprd)';
|
||||
|
||||
# FAR SYNC activated: main dataguard relies redo to cascade and FAR SYNC relies redo to remote dataguard
|
||||
DGMGRL> edit database huttprd set property redoroutes = '(local:huttdrp,huttfar SYNC)(huttdrp:huttca1 ASYNC)';
|
||||
DGMGRL> edit database huttdrp set property redoroutes = '(huttprd:huttca1 ASYNC)(local:huttprd,huttfar SYNC)';
|
||||
DGMGRL> edit far_sync huttfar set property redoroutes = '(huttprd:huttrem ASYNC)(huttdrp:huttrem ASYNC)';
|
||||
|
||||
# #########################################################################################
|
||||
|
||||
|
||||
DGMGRL> edit database huttprd set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit database huttdrp set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit database huttca1 set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit database huttrem set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit far_sync huttfar set property StandbyFileManagement='AUTO';
|
||||
|
||||
# unless setting configuration protection to MaxAvailability, cascade standby redelog was not used and broker show warnings
|
||||
# after setting to MaxAvailability, switching back to MaxPerformance does not affected the sitiation, cascade standby still use
|
||||
# standby redologs and broker status does not display warnings anymore
|
||||
|
||||
DGMGRL> edit configuration set protection mode as MaxAvailability;
|
||||
DGMGRL> edit configuration set protection mode as MaxPerformance;
|
||||
|
||||
|
||||
# not sure that help for
|
||||
# ORA-16853: apply lag has exceeded specified threshold
|
||||
# ORA-16855: transport lag has exceeded specified threshold
|
||||
|
||||
DGMGRL> edit database huttprd set property TransportDisconnectedThreshold=0;
|
||||
DGMGRL> edit database huttdrp set property TransportDisconnectedThreshold=0;
|
||||
DGMGRL> edit database huttca1 set property TransportDisconnectedThreshold=0;
|
||||
|
||||
DGMGRL> edit database huttprd set property ApplyLagThreshold=0;
|
||||
DGMGRL> edit database huttdrp set property ApplyLagThreshold=0;
|
||||
DGMGRL> edit database huttca1 set property ApplyLagThreshold=0;
|
||||
|
||||
# othrwise, to reset:
|
||||
|
||||
DGMGRL> edit database huttprd reset property ApplyLagThreshold;
|
||||
DGMGRL> edit database huttdrp reset property ApplyLagThreshold;
|
||||
DGMGRL> edit database huttca1 reset property ApplyLagThreshold;
|
||||
|
||||
DGMGRL> edit database huttprd reset property TransportDisconnectedThreshold;
|
||||
DGMGRL> edit database huttdrp reset property TransportDisconnectedThreshold;
|
||||
DGMGRL> edit database huttca1 reset property TransportDisconnectedThreshold;
|
||||
|
||||
|
||||
DGMGRL> enable database huttca1;
|
||||
DGMGRL> edit database huttca1 set state='APPLY-OFF';
|
||||
DGMGRL> edit database huttca1 set state='ONLINE';
|
||||
|
||||
-- create standby redologs
|
||||
select 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log union all
|
||||
select distinct 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
|
||||
-- clear / drop standby redologs
|
||||
select 'ALTER DATABASE CLEAR LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
select 'ALTER DATABASE DROP STANDBY LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
|
||||
|
||||
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
|
||||
set lines 200
|
||||
|
||||
-- on PRIMARY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log group by THREAD#;
|
||||
|
||||
-- on STANDBY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log
|
||||
where APPLIED='YES' group by THREAD#;
|
||||
|
||||
|
||||
set lines 155 pages 9999
|
||||
col thread# for 9999990
|
||||
col sequence# for 999999990
|
||||
col grp for 990
|
||||
col fnm for a50 head "File Name"
|
||||
col "Fisrt SCN Number" for 999999999999990
|
||||
break on thread
|
||||
|
||||
select
|
||||
a.thread#
|
||||
,a.sequence#
|
||||
,a.group# grp
|
||||
, a.bytes/1024/1024 Size_MB
|
||||
,a.status
|
||||
,a.archived
|
||||
,a.first_change# "First SCN Number"
|
||||
,to_char(FIRST_TIME,'YYYY-MM-DD HH24:MI:SS') "First SCN Time"
|
||||
,to_char(LAST_TIME,'YYYY-MM-DD HH24:MI:SS') "Last SCN Time"
|
||||
from
|
||||
gv$standby_log a order by 1,2,3,4
|
||||
/
|
||||
|
||||
|
||||
|
||||
# https://www.dba-scripts.com/articles/dataguard-standby/data-guard-far-sync/
|
||||
|
||||
|
||||
edit database huttdrp set property redoroutes = '(huttprd:huttca1)(huttprd:huttrem)(local:huttprd)';
|
||||
enable database huttrem;
|
||||
|
||||
|
||||
|
||||
|
||||
create pluggable database JABBA admin user admin identified by "Secret00!";
|
||||
|
||||
11
divers/dg.txt
Normal file
11
divers/dg.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
|
||||
set lines 200
|
||||
|
||||
-- on PRIMARY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log group by THREAD#;
|
||||
|
||||
-- on STANDBY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log
|
||||
where APPLIED='YES' group by THREAD#;
|
||||
17
divers/disable_IPV6.md
Normal file
17
divers/disable_IPV6.md
Normal file
@@ -0,0 +1,17 @@
|
||||
Create a sysctl config file:
|
||||
```bash
|
||||
tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<'EOF'
|
||||
net.ipv6.conf.all.disable_ipv6 = 1
|
||||
net.ipv6.conf.default.disable_ipv6 = 1
|
||||
EOF
|
||||
```
|
||||
|
||||
Apply the settings:
|
||||
```bash
|
||||
sudo sysctl --system
|
||||
```
|
||||
|
||||
Verify:
|
||||
```bash
|
||||
cat /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||
```
|
||||
9
divers/dnsmanager_api_example_01.txt
Normal file
9
divers/dnsmanager_api_example_01.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
curl -s https://app.dnsmanager.io/api/v1/user/domains \
|
||||
-u "9422ac9d-2c62-4967-ae12-c1d15bbbe200:I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy" | jq
|
||||
|
||||
curl -s https://app.dnsmanager.io/api/v1/user/domain/151914/records \
|
||||
-u "9422ac9d-2c62-4967-ae12-c1d15bbbe200:I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy" | jq
|
||||
|
||||
curl -s -X PUT -d content="1.1.1.1" https://app.dnsmanager.io/api/v1/user/domain/151914/record/16572810 \
|
||||
-u "9422ac9d-2c62-4967-ae12-c1d15bbbe200:I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy" | jq
|
||||
|
||||
19
divers/import_certificate_RHEL9.md
Normal file
19
divers/import_certificate_RHEL9.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# How to Import Your Own CA Root on RHEL 9
|
||||
|
||||
## Place your CA certificate in the correct directory
|
||||
|
||||
```bash
|
||||
cp /mnt/unprotected/tmp/oracle/swgalaxy_root_ca.cert.pem /etc/pki/ca-trust/source/anchors/
|
||||
```
|
||||
|
||||
## Update the system trust store
|
||||
|
||||
```bash
|
||||
update-ca-trust extract
|
||||
```
|
||||
|
||||
## Verify that your CA is now trusted
|
||||
|
||||
```bash
|
||||
openssl verify -CAfile /etc/pki/tls/certs/ca-bundle.crt /etc/pki/ca-trust/source/anchors/swgalaxy_root_ca.cert.pem
|
||||
```
|
||||
8
divers/issue_after_swap_lv_destroy_01.txt
Normal file
8
divers/issue_after_swap_lv_destroy_01.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
# after destroing a SWAP LV for create a new one, old reference remains on /etc/default/grub
|
||||
# in GRUB_CMDLINE_LINUX
|
||||
|
||||
# delete GRUB_CMDLINE_LINUX from /etc/default/grub
|
||||
vi /etc/default/grub
|
||||
grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# restart the machine
|
||||
5
divers/linux_change_machine_id.md
Normal file
5
divers/linux_change_machine_id.md
Normal file
@@ -0,0 +1,5 @@
|
||||
Commands to generate a new machine ID:
|
||||
```bash
|
||||
cat /dev/null > /etc/machine-id
|
||||
systemd-machine-id-setup
|
||||
```
|
||||
27
divers/linux_cleanup_boot_partition.txt
Normal file
27
divers/linux_cleanup_boot_partition.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
@ Technical Tip: Clean up /boot in CentOS, RHEL or Rocky Linux 8 and up
|
||||
|
||||
1) Check the current kernel being used:
|
||||
|
||||
|
||||
sudo uname -sr
|
||||
|
||||
|
||||
2) List all kernels installed on the system:
|
||||
|
||||
|
||||
sudo rpm -q kernel
|
||||
|
||||
|
||||
3) Delete old kernels and only leave <X> number of kernels:
|
||||
|
||||
|
||||
sudo dnf remove --oldinstallonly --setopt installonly_limit=<X> kernel
|
||||
|
||||
|
||||
Note: <X> can be set to 1, 2, 3 or other numeric values. Carefully check the running kernel in step 2 and any other kernels used before running this command. Alternatively, use the following command to delete kernels one by one:
|
||||
|
||||
|
||||
rpm -e <kernel_name>
|
||||
|
||||
Kernel names can be obtained through step 2.
|
||||
|
||||
26
divers/linux_create_swap_partition_01.txt
Normal file
26
divers/linux_create_swap_partition_01.txt
Normal file
@@ -0,0 +1,26 @@
|
||||
# create swap partition on /dev/vdb
|
||||
###################################
|
||||
|
||||
# create PV,VG and LV
|
||||
lsblk
|
||||
fdisk /dev/vdb1
|
||||
pvs
|
||||
pvcreate /dev/vdb1
|
||||
vgcreate vgswap /dev/vdb1
|
||||
vgs
|
||||
lvs
|
||||
lvcreate -n swap -l 100%FREE vgswap
|
||||
ls /dev/mapper/vgswap-swap
|
||||
|
||||
# format LV as swap
|
||||
mkswap /dev/mapper/vgswap-swap
|
||||
|
||||
# add swap entery in /etc/fstab
|
||||
/dev/mapper/vgswap-swap swap swap defaults 0 0
|
||||
|
||||
# activate swap
|
||||
swapon -va
|
||||
|
||||
# check swap
|
||||
cat /proc/swaps
|
||||
free -h
|
||||
6
divers/linux_remove_old_kernel_01.txt
Normal file
6
divers/linux_remove_old_kernel_01.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
# remove old kernel from /boot
|
||||
# https://community.fortinet.com/t5/FortiSOAR-Knowledge-Base/Technical-Tip-Clean-up-boot-in-CentOS-RHEL-or-Rocky-Linux-8-and/ta-p/257565
|
||||
|
||||
uname -sr
|
||||
rpm -q kernel
|
||||
dnf remove --oldinstallonly --setopt installonly_limit=2 kernel
|
||||
96
divers/my_root_CA_generate_certificate.md
Normal file
96
divers/my_root_CA_generate_certificate.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Issue a Server Certificate
|
||||
|
||||
> Based on https://medium.com/@sureshchand.rhce/how-to-build-a-root-ca-intermediate-ca-with-openssl-eba1c73d1591
|
||||
|
||||
## Create server key
|
||||
``` bash
|
||||
openssl genpkey -algorithm RSA \
|
||||
-out exegol.swgalaxy.key.pem \
|
||||
-pkeyopt rsa_keygen_bits:2048
|
||||
```
|
||||
|
||||
## Create CSR with SAN
|
||||
|
||||
Define a configuration file for the CSR `exegol.swgalaxy.cnf`:
|
||||
```
|
||||
[ req ]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
prompt = no
|
||||
|
||||
[ req_distinguished_name ]
|
||||
C = FR
|
||||
ST = Yvelines
|
||||
L = Le Vesinet
|
||||
O = swgalaxy
|
||||
OU = swgalaxy servers
|
||||
CN = exegol.swgalaxy
|
||||
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
DNS.1 = exegol.swgalaxy
|
||||
DNS.2 = exegol
|
||||
```
|
||||
|
||||
Create thr CSR:
|
||||
|
||||
``` bash
|
||||
openssl req -new -key exegol.swgalaxy.key.pem \
|
||||
-out exegol.swgalaxy.csr.pem \
|
||||
-config exegol.swgalaxy.cnf
|
||||
```
|
||||
|
||||
|
||||
## Sign with Intermediate CA
|
||||
|
||||
Update `server_cert` extension on **intermediate CA** configuration file `/app/pki/intermediate/openssl.cnf`:
|
||||
```
|
||||
[ server_cert ]
|
||||
# Basic identity
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
|
||||
# Server certificates must NOT be CA certificates
|
||||
basicConstraints = critical, CA:FALSE
|
||||
|
||||
# Key usage: what the certificate is allowed to do
|
||||
keyUsage = critical, digitalSignature, keyEncipherment
|
||||
|
||||
# Extended key usage: define this as a TLS server certificate
|
||||
extendedKeyUsage = serverAuth
|
||||
|
||||
# Allow SANs (modern TLS requires SANs)
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
DNS.1 = exegol.swgalaxy
|
||||
DNS.2 = exegol
|
||||
```
|
||||
|
||||
Sign the certificate with **intermediate CA**:
|
||||
|
||||
``` bash
|
||||
openssl ca -config /app/pki/intermediate/openssl.cnf \
|
||||
-extensions server_cert \
|
||||
-days 3650 -notext -md sha256 \
|
||||
-in exegol.swgalaxy.csr.pem \
|
||||
-out /app/pki/intermediate/certs/exegol.swgalaxy.cert.pem
|
||||
```
|
||||
|
||||
## Verify the chain
|
||||
|
||||
``` bash
|
||||
openssl verify \
|
||||
-CAfile /app/pki/intermediate/certs/ca-chain.cert.pem \
|
||||
/app/pki/intermediate/certs/exegol.swgalaxy.cert.pem
|
||||
```
|
||||
|
||||
## Verify the certificate
|
||||
|
||||
``` bash
|
||||
openssl x509 -text -noout \
|
||||
-in /app/pki/intermediate/certs/exegol.swgalaxy.cert.pem
|
||||
```
|
||||
|
||||
2
divers/oracle_resource_manager_01.txt
Normal file
2
divers/oracle_resource_manager_01.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
# CPU usage limit with resource manager in Oracle
|
||||
# https://smarttechways.com/2021/05/12/cpu-usage-limit-with-resource-manager-in-oracle/
|
||||
178
divers/patch_standby_first_01.txt
Normal file
178
divers/patch_standby_first_01.txt
Normal file
@@ -0,0 +1,178 @@
|
||||
select force_logging from v$database;
|
||||
|
||||
set lines 256 pages 999
|
||||
|
||||
col MEMBER for a60
|
||||
select * from v$logfile;
|
||||
|
||||
-- create standby redologs
|
||||
select 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
select distinct 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
|
||||
-- clear / drop standby redologs
|
||||
select 'ALTER DATABASE CLEAR LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
select 'ALTER DATABASE DROP STANDBY LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
|
||||
|
||||
|
||||
|
||||
*.audit_file_dest='/app/oracle/base/admin/ANDODRP/adump'
|
||||
*.audit_trail='OS'
|
||||
*.compatible='19.0.0.0'
|
||||
*.control_files='/data/ANDODRP/control01.ctl'
|
||||
*.db_block_size=8192
|
||||
*.db_create_file_dest='/data'
|
||||
*.db_create_online_log_dest_1='/data'
|
||||
*.db_name='ANDO'
|
||||
*.db_recovery_file_dest_size=10G
|
||||
*.db_recovery_file_dest='/reco'
|
||||
*.db_unique_name='ANDODRP'
|
||||
*.diagnostic_dest='/app/oracle/base/admin/ANDODRP'
|
||||
*.enable_goldengate_replication=TRUE
|
||||
*.enable_pluggable_database=FALSE
|
||||
*.instance_name='ANDODRP'
|
||||
*.log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
*.log_archive_format='%t_%s_%r.arc'
|
||||
*.open_cursors=300
|
||||
*.pga_aggregate_target=512M
|
||||
*.processes=350
|
||||
*.remote_login_passwordfile='exclusive'
|
||||
*.sga_max_size=3G
|
||||
*.sga_target=3G
|
||||
*.undo_tablespace='TS_UNDO'
|
||||
|
||||
|
||||
|
||||
create spfile='/app/oracle/base/admin/ANDODRP/spfile/spfileANDODRP.ora' from pfile='/mnt/yavin4/tmp/_oracle_/tmp/ANDO.txt';
|
||||
|
||||
|
||||
/mnt/yavin4/tmp/_oracle_/tmp/bakura/listener.ora
|
||||
|
||||
STATIC =
|
||||
(DESCRIPTION_LIST =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = bakura)(PORT = 1600))
|
||||
)
|
||||
)
|
||||
|
||||
SID_LIST_STATIC =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = ANDODRP_STATIC)
|
||||
(SID_NAME = ANDODRP)
|
||||
(ORACLE_HOME = /app/oracle/product/19)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
export TNS_ADMIN=/mnt/yavin4/tmp/_oracle_/tmp/bakura
|
||||
lsnrctl start STATIC
|
||||
lsnrctl status STATIC
|
||||
|
||||
|
||||
|
||||
|
||||
/mnt/yavin4/tmp/_oracle_/tmp/togoria/listener.ora
|
||||
|
||||
STATIC =
|
||||
(DESCRIPTION_LIST =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = togoria)(PORT = 1600))
|
||||
)
|
||||
)
|
||||
|
||||
SID_LIST_STATIC =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = ANDOPRD_STATIC)
|
||||
(SID_NAME = ANDOPRD)
|
||||
(ORACLE_HOME = /app/oracle/product/19)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
export TNS_ADMIN=/mnt/yavin4/tmp/_oracle_/tmp/togoria
|
||||
lsnrctl start STATIC
|
||||
lsnrctl status STATIC
|
||||
|
||||
|
||||
connect sys/"Secret00!"@//togoria:1600/ANDOPRD_STATIC as sysdba
|
||||
connect sys/"Secret00!"@//bakura:1600/ANDODRP_STATIC as sysdba
|
||||
|
||||
|
||||
rman target=sys/"Secret00!"@//togoria:1600/ANDOPRD_STATIC auxiliary=sys/"Secret00!"@//bakura:1600/ANDODRP_STATIC
|
||||
run {
|
||||
allocate channel pri1 device type DISK;
|
||||
allocate channel pri2 device type DISK;
|
||||
allocate channel pri3 device type DISK;
|
||||
allocate channel pri4 device type DISK;
|
||||
allocate auxiliary channel aux1 device type DISK;
|
||||
allocate auxiliary channel aux2 device type DISK;
|
||||
allocate auxiliary channel aux3 device type DISK;
|
||||
allocate auxiliary channel aux4 device type DISK;
|
||||
duplicate target database
|
||||
for standby
|
||||
dorecover
|
||||
from active database
|
||||
nofilenamecheck
|
||||
using compressed backupset section size 1G;
|
||||
}
|
||||
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ANDOPRD/divers/dr1ANDOPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ANDOPRD/divers/dr2ANDOPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ANDODRP/divers/dr1ANDODRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ANDODRP/divers/dr2ANDODRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
|
||||
dgmgrl
|
||||
connect sys/"Secret00!"@//togoria:1600/ANDOPRD_STATIC
|
||||
|
||||
create configuration ANDO as
|
||||
primary database is ANDOPRD
|
||||
connect identifier is "//togoria:1600/ANDOPRD_STATIC";
|
||||
|
||||
add database ANDODRP
|
||||
as connect identifier is "//bakura:1600/ANDODRP_STATIC"
|
||||
maintained as physical;
|
||||
|
||||
enable configuration;
|
||||
show configuration;
|
||||
|
||||
edit database 'andoprd' set property ArchiveLagTarget=0;
|
||||
edit database 'andoprd' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'andoprd' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'andoprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit database 'andodrp' set property ArchiveLagTarget=0;
|
||||
edit database 'andodrp' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'andodrp' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'andodrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit database 'andoprd' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=togoria)(PORT=1600))(CONNECT_DATA=(SERVICE_NAME=ANDOPRD_STATIC)(SERVER=DEDICATED)))';
|
||||
edit database 'andodrp' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=bakura)(PORT=1600))(CONNECT_DATA=(SERVICE_NAME=ANDODRP_STATIC)(SERVER=DEDICATED)))';
|
||||
|
||||
validate database 'andoprd'
|
||||
validate database 'andodrp'
|
||||
|
||||
switchover to 'andodrp'
|
||||
switchover to 'andoprd'
|
||||
switchover to 'andodrp'
|
||||
|
||||
convert database 'andodrp' to snapshot standby;
|
||||
convert database 'andodrp' to physical standby;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
24
divers/purines.md
Normal file
24
divers/purines.md
Normal file
@@ -0,0 +1,24 @@
|
||||
| Poisson / Viande / Fruit de mer | Purines (mg/100 g) |
|
||||
|----------------------------------|--------------------|
|
||||
| Dinde hachée, crue | ~96 |
|
||||
| Cabillaud | ~98 |
|
||||
| Aiglefin | ~110 |
|
||||
| Colin | ~110 |
|
||||
| Merlu | ~110 |
|
||||
| Flétan | ~120 |
|
||||
| Noix de Saint-Jacques | ~135 |
|
||||
| Dorade | ~140 |
|
||||
| Bar | ~150 |
|
||||
| Poulet haché, cru | ~158.7 |
|
||||
| Saumon | ~170 |
|
||||
| Truite | ~170 |
|
||||
| Crevette | ~200 |
|
||||
| Porc | ~230 |
|
||||
| Bœuf | ~250 |
|
||||
| Thon | ~290 |
|
||||
| Sardine crue | 345 |
|
||||
| Hareng en conserve | 378 |
|
||||
| Foie de lotte cuit | 398.7 |
|
||||
| Anchois crus | 411 |
|
||||
| Crevette Sakura séchée | 748.9 |
|
||||
| Maquereau japonais | 1 175 |
|
||||
3
divers/random_string_bash.txt
Normal file
3
divers/random_string_bash.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
# generating random string in bash
|
||||
echo $RANDOM | md5sum | head -c 20; echo;
|
||||
cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20; echo;
|
||||
19
divers/rocky9_nmcli_example_01.txt
Normal file
19
divers/rocky9_nmcli_example_01.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
# Rocky 9 network interface change IP address and host name example
|
||||
###################################################################
|
||||
|
||||
nmcli connection show
|
||||
nmcli connection show --active
|
||||
|
||||
nmcli connection modify enp1s0 ipv4.address 192.168.0.52/24
|
||||
nmcli connection modify enp1s0 ipv4.method manual ipv6.method ignore
|
||||
nmcli connection modify enp1s0 ipv4.gateway 192.168.0.1
|
||||
nmcli connection modify enp1s0 ipv4.dns 192.168.0.8
|
||||
nmcli connection modify enp1s0 ipv4.dns-search swgalaxy
|
||||
|
||||
nmcli connection modify enp2s0 ipv4.address 192.168.1.52/24 ipv4.method manual ipv6.method ignore
|
||||
|
||||
# list host interfaces
|
||||
hostname -I
|
||||
|
||||
# set host name
|
||||
hostnamectl hostname ithor.swgalaxy
|
||||
8
divers/screen_command.md
Normal file
8
divers/screen_command.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## Screen configuration
|
||||
|
||||
Configuration file `~/.screenrc`:
|
||||
|
||||
termcapinfo xterm* ti@:te@
|
||||
caption always
|
||||
caption string "%{= bW}%3n %{y}%t %{-}%= %{m}%H%?%{-} -- %{c}%l%?%{-} -- %D %M %d %{y}%c"
|
||||
|
||||
34
divers/split_string_in_words_01.sql
Normal file
34
divers/split_string_in_words_01.sql
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
vplesnlia: split input string in words
|
||||
*/
|
||||
|
||||
|
||||
DECLARE
|
||||
TYPE v_arr IS
|
||||
VARRAY(100) OF VARCHAR2(60);
|
||||
var v_arr;
|
||||
return_value VARCHAR2(60);
|
||||
BEGIN
|
||||
var := v_arr();
|
||||
FOR c1 IN (
|
||||
SELECT
|
||||
regexp_substr(
|
||||
'&&1', '[^ ]+', 1, level
|
||||
) AS string_parts
|
||||
FROM
|
||||
dual
|
||||
CONNECT BY
|
||||
regexp_substr(
|
||||
'&&1', '[^ ]+', 1, level
|
||||
) IS NOT NULL
|
||||
) LOOP
|
||||
var.extend;
|
||||
var(var.last) := c1.string_parts;
|
||||
END LOOP;
|
||||
|
||||
FOR i IN var.first..var.last LOOP
|
||||
return_value := var(i);
|
||||
dbms_output.put_line(return_value);
|
||||
END LOOP;
|
||||
|
||||
END;
|
||||
236
divers/sql_analytic_01.txt
Normal file
236
divers/sql_analytic_01.txt
Normal file
@@ -0,0 +1,236 @@
|
||||
https://livesql.oracle.com/apex/livesql/file/tutorial_GNRYA4548AQNXC0S04DXVEV08.html
|
||||
https://oracle-base.com/articles/misc/rank-dense-rank-first-last-analytic-functions#rank
|
||||
|
||||
drop table CARS purge;
|
||||
create table CARS (
|
||||
id INTEGER GENERATED ALWAYS AS IDENTITY
|
||||
,brand VARCHAR2(15) not null
|
||||
,model VARCHAR2(10) not null
|
||||
,year NUMBER(4) not null
|
||||
,color VARCHAR2(10) not null
|
||||
,category VARCHAR2(12) not null
|
||||
,price NUMBER not null
|
||||
,power NUMBER(4) not null
|
||||
,fuel VARCHAR2(8) not null
|
||||
)
|
||||
;
|
||||
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Audi','A4','2001','gray','city','5400','150','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Audi','A6','2012','gray','limousine','12000','204','DIESEL');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('BMW','Serie 4','2020','white','sport','16000','240','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('BMW','X6','2018','blue','SUV','15000','280','DIESEL');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Volkswagen','Polo','2014','gray','city','4800','90','DIESEL');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Renault','Arkana','2023','green','SUV','35000','220','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Porche','Cayenne','2021','black','SUV','41000','280','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Tesla','Model 3','2023','black','city','30500','250','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Tesla','Model 3','2023','white','city','30500','250','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Tesla','Model 3','2022','black','city','24000','250','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Audi','A4','2022','red','city','26000','200','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Audi','Q5','2021','gray','SUV','38000','260','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('BMW','Serie 3','2022','white','city','46000','240','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('BMW','Serie 3','2023','white','city','44000','240','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('BMW','Serie 3','2021','white','city','42000','240','ELECTRIC');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Renault','Clio','2019','black','city','8900','110','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Renault','Clio','2020','black','city','9600','110','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Renault','Twingo','2019','red','city','7800','90','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Renault','Twingo','2022','green','city','9200','90','SP');
|
||||
Insert into POC.CARS (BRAND,MODEL,YEAR,COLOR,CATEGORY,PRICE,POWER,FUEL) values ('Porche','911','2022','gray','sport','61000','310','SP');
|
||||
|
||||
commit;
|
||||
|
||||
|
||||
-- display cars and total cars count
|
||||
select
|
||||
c.*
|
||||
,count(*) over() as Total_count
|
||||
from
|
||||
CARS c
|
||||
;
|
||||
|
||||
-- display cars and the number of cars by brand
|
||||
select
|
||||
c.*
|
||||
,count(*) over(partition by (brand)) as Brand_count
|
||||
from
|
||||
CARS c
|
||||
;
|
||||
|
||||
|
||||
-- number of cars and sum of prices grouped by color
|
||||
select color, count(*), sum(price)
|
||||
from CARS
|
||||
group by color;
|
||||
|
||||
-- integrating last group by query as analytic
|
||||
-- adding that "inline" for each line
|
||||
select
|
||||
c.*
|
||||
,count(*) over(partition by (color)) as count_by_color
|
||||
,sum(price) over(partition by (color)) as SUM_price_by_color
|
||||
from
|
||||
CARS c
|
||||
;
|
||||
|
||||
|
||||
|
||||
-- average price by category
|
||||
select CATEGORY, avg(price)
|
||||
from CARS
|
||||
group by CATEGORY;
|
||||
|
||||
-- for each car, the percentage of price/median price of it's category
|
||||
select
|
||||
c.*
|
||||
,100*c.price/avg(c.price) over (partition by (category)) Price_by_avg_category_PERCENT
|
||||
from
|
||||
CARS c
|
||||
;
|
||||
|
||||
|
||||
select CATEGORY, average(price)
|
||||
from CARS
|
||||
group by CATEGORY;
|
||||
|
||||
|
||||
-- order by in alalytic: runtime from FIRST key until CURRENT key
|
||||
select b.*,
|
||||
count(*) over (
|
||||
order by brick_id
|
||||
) running_total,
|
||||
sum ( weight ) over (
|
||||
order by brick_id
|
||||
) running_weight
|
||||
from bricks b;
|
||||
|
||||
|
||||
BRICK_ID COLOUR SHAPE WEIGHT RUNNING_TOTAL RUNNING_WEIGHT
|
||||
---------- ---------- ---------- ---------- ------------- --------------
|
||||
1 blue cube 1 1 1
|
||||
2 blue pyramid 2 2 3
|
||||
3 red cube 1 3 4
|
||||
4 red cube 2 4 6
|
||||
5 red pyramid 3 5 9
|
||||
6 green pyramid 1 6 10
|
||||
|
||||
6 rows selected.
|
||||
|
||||
|
||||
select
|
||||
c.*
|
||||
,sum(c.price) over (order by c.id)
|
||||
from
|
||||
cars c;
|
||||
|
||||
|
||||
|
||||
ID BRAND MODEL YEAR COLOR CATEGORY PRICE POWER FUEL SUM(C.PRICE)OVER(ORDERBYC.ID)
|
||||
---------- --------------- ---------- ---------- ---------- ------------ ---------- ---------- -------- -----------------------------
|
||||
1 Audi A4 2001 gray city 5400 150 SP 5400
|
||||
2 Audi A6 2012 gray limousine 12000 204 DIESEL 17400
|
||||
3 BMW Serie 4 2020 white sport 16000 240 SP 33400
|
||||
4 BMW X6 2018 blue SUV 15000 280 DIESEL 48400
|
||||
5 Volkswagen Polo 2014 gray city 4800 90 DIESEL 53200
|
||||
6 Renault Arkana 2023 green SUV 35000 220 ELECTRIC 88200
|
||||
7 Porche Cayenne 2021 black SUV 41000 280 SP 129200
|
||||
8 Tesla Model 3 2023 black city 30500 250 ELECTRIC 159700
|
||||
9 Tesla Model 3 2023 white city 30500 250 ELECTRIC 190200
|
||||
10 Tesla Model 3 2022 black city 24000 250 ELECTRIC 214200
|
||||
11 Audi A4 2022 red city 26000 200 SP 240200
|
||||
12 Audi Q5 2021 gray SUV 38000 260 SP 278200
|
||||
13 BMW Serie 3 2022 white city 46000 240 ELECTRIC 324200
|
||||
14 BMW Serie 3 2023 white city 44000 240 ELECTRIC 368200
|
||||
15 BMW Serie 3 2021 white city 42000 240 ELECTRIC 410200
|
||||
16 Renault Clio 2019 black city 8900 110 SP 419100
|
||||
17 Renault Clio 2020 black city 9600 110 SP 428700
|
||||
18 Renault Twingo 2019 red city 7800 90 SP 436500
|
||||
19 Renault Twingo 2022 green city 9200 90 SP 445700
|
||||
20 Porche 911 2022 gray sport 61000 310 SP 506700
|
||||
|
||||
20 rows selected.
|
||||
|
||||
|
||||
-- adding PARTITION by EXPR will "group by EXPR" and reset FIRST key for each group
|
||||
select
|
||||
c.*
|
||||
,sum(c.price) over (partition by brand order by c.id)
|
||||
from
|
||||
cars c;
|
||||
|
||||
|
||||
ID BRAND MODEL YEAR COLOR CATEGORY PRICE POWER FUEL SUM(C.PRICE)OVER(PARTITIONBYBRANDORDERBYC.ID)
|
||||
---------- --------------- ---------- ---------- ---------- ------------ ---------- ---------- -------- ---------------------------------------------
|
||||
1 Audi A4 2001 gray city 5400 150 SP 5400
|
||||
2 Audi A6 2012 gray limousine 12000 204 DIESEL 17400
|
||||
11 Audi A4 2022 red city 26000 200 SP 43400
|
||||
12 Audi Q5 2021 gray SUV 38000 260 SP 81400
|
||||
3 BMW Serie 4 2020 white sport 16000 240 SP 16000
|
||||
4 BMW X6 2018 blue SUV 15000 280 DIESEL 31000
|
||||
13 BMW Serie 3 2022 white city 46000 240 ELECTRIC 77000
|
||||
14 BMW Serie 3 2023 white city 44000 240 ELECTRIC 121000
|
||||
15 BMW Serie 3 2021 white city 42000 240 ELECTRIC 163000
|
||||
7 Porche Cayenne 2021 black SUV 41000 280 SP 41000
|
||||
20 Porche 911 2022 gray sport 61000 310 SP 102000
|
||||
6 Renault Arkana 2023 green SUV 35000 220 ELECTRIC 35000
|
||||
16 Renault Clio 2019 black city 8900 110 SP 43900
|
||||
17 Renault Clio 2020 black city 9600 110 SP 53500
|
||||
18 Renault Twingo 2019 red city 7800 90 SP 61300
|
||||
19 Renault Twingo 2022 green city 9200 90 SP 70500
|
||||
8 Tesla Model 3 2023 black city 30500 250 ELECTRIC 30500
|
||||
9 Tesla Model 3 2023 white city 30500 250 ELECTRIC 61000
|
||||
10 Tesla Model 3 2022 black city 24000 250 ELECTRIC 85000
|
||||
5 Volkswagen Polo 2014 gray city 4800 90 DIESEL 4800
|
||||
|
||||
20 rows selected.
|
||||
|
||||
|
||||
|
||||
-- when the keys of ORDER BY are not distinct, over (order by KEY) the analytic function will not change for lignes having the same KEY value
|
||||
-- to force the compute from previous line to current add : rows between unbounded preceding and current row
|
||||
|
||||
|
||||
|
||||
select b.*,
|
||||
count(*) over (
|
||||
order by weight
|
||||
) running_total,
|
||||
sum ( weight ) over (
|
||||
order by weight
|
||||
) running_weight
|
||||
from bricks b
|
||||
order by weight;
|
||||
|
||||
|
||||
BRICK_ID COLOUR SHAPE WEIGHT RUNNING_TOTAL RUNNING_WEIGHT
|
||||
---------- ---------- ---------- ---------- ------------- --------------
|
||||
1 blue cube 1 3 3
|
||||
3 red cube 1 3 3
|
||||
6 green pyramid 1 3 3
|
||||
4 red cube 2 5 7
|
||||
2 blue pyramid 2 5 7
|
||||
5 red pyramid 3 6 10
|
||||
|
||||
|
||||
select b.*,
|
||||
count(*) over (
|
||||
order by weight
|
||||
rows between unbounded preceding and current row
|
||||
) running_total,
|
||||
sum ( weight ) over (
|
||||
order by weight
|
||||
rows between unbounded preceding and current row
|
||||
) running_weight
|
||||
from bricks b
|
||||
order by weight;
|
||||
|
||||
|
||||
|
||||
BRICK_ID COLOUR SHAPE WEIGHT RUNNING_TOTAL RUNNING_WEIGHT
|
||||
---------- ---------- ---------- ---------- ------------- --------------
|
||||
1 blue cube 1 1 1
|
||||
3 red cube 1 2 2
|
||||
6 green pyramid 1 3 3
|
||||
4 red cube 2 4 5
|
||||
2 blue pyramid 2 5 7
|
||||
5 red pyramid 3 6 10
|
||||
|
||||
6 rows selected.
|
||||
18
divers/swingbench_01.md
Normal file
18
divers/swingbench_01.md
Normal file
@@ -0,0 +1,18 @@
|
||||
Setup (schema creation).
|
||||
This will create SOE schema with *secret* password in the PDB YODA where admin user is sysdba.
|
||||
|
||||
./oewizard -v -cl -create \
|
||||
-cs wayland/YODA -u soe -p secret \
|
||||
-scale 1 -tc 2 -dba "admin as sysdba" -dbap "Secret00!" \
|
||||
-ts ts_swingbench
|
||||
|
||||
Check:
|
||||
|
||||
./sbutil -soe -cs wayland/YODA -soe -u soe -p secret -val
|
||||
|
||||
Run benchmark:
|
||||
|
||||
./charbench -c ../configs/SOE_Server_Side_V2.xml \
|
||||
-u soe -p secret -uc 5 -cs wayland/YODA \
|
||||
-min 0 -max 10 -intermin 200 -intermax 500 -mt 5000 -mr -v users,tpm,tps,errs,vresp
|
||||
|
||||
21
divers/tanel_update.txt
Normal file
21
divers/tanel_update.txt
Normal file
@@ -0,0 +1,21 @@
|
||||
delete mode 100644 tpt/ash/ash_wait_chains2.sql
|
||||
create mode 100644 tpt/ash/cashtop.sql
|
||||
delete mode 100644 tpt/ash/dash_wait_chains2.sql
|
||||
create mode 100644 tpt/ash/dashtopsum.sql
|
||||
create mode 100644 tpt/ash/dashtopsum_pga.sql
|
||||
delete mode 100644 tpt/ash/example_ash_report.html
|
||||
create mode 100644 tpt/ash/sqlexec_duration_buckets.sql
|
||||
create mode 100644 tpt/awr/awr_sqlid_binds.sql
|
||||
create mode 100644 tpt/awr/perfhub.html
|
||||
create mode 100644 tpt/create_sql_baseline_awr.sql
|
||||
create mode 100644 tpt/descpartxx.sql
|
||||
create mode 100644 tpt/descxx11.sql
|
||||
create mode 100644 tpt/lpstat.sql
|
||||
create mode 100644 tpt/netstat.sql
|
||||
create mode 100644 tpt/netstat2.sql
|
||||
create mode 100644 tpt/npstat.sql
|
||||
create mode 100644 tpt/oerrh.sql
|
||||
create mode 100644 tpt/oerrign.sql
|
||||
create mode 100644 tpt/setup/grant_snapper_privs.sql
|
||||
create mode 100644 tpt/setup/logon_trigger_ospid.sql
|
||||
create mode 100644 tpt/tabhisthybrid.sql
|
||||
231
divers/timescaledb_01.txt
Normal file
231
divers/timescaledb_01.txt
Normal file
@@ -0,0 +1,231 @@
|
||||
CREATE TABLE t (
|
||||
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
i INTEGER,
|
||||
c VARCHAR(30),
|
||||
ts TIMESTAMP
|
||||
);
|
||||
|
||||
INSERT INTO t (i, c, ts)
|
||||
SELECT
|
||||
(random() * 9999 + 1)::int AS i,
|
||||
md5(random()::text)::varchar(30) AS c,
|
||||
(
|
||||
timestamp '2000-01-01'
|
||||
+ random() * (timestamp '2025-12-31' - timestamp '2000-01-01')
|
||||
) AS ts
|
||||
FROM generate_series(1, 200000000);
|
||||
|
||||
|
||||
-- export standard table to CSV
|
||||
COPY t
|
||||
TO '/mnt/unprotected/tmp/postgres/t.csv'
|
||||
DELIMITER ','
|
||||
CSV HEADER;
|
||||
|
||||
-- import standard table from CSV
|
||||
|
||||
CREATE TABLE t (
|
||||
id INTEGER,
|
||||
i INTEGER,
|
||||
c TEXT,
|
||||
ts TIMESTAMPTZ
|
||||
);
|
||||
|
||||
COPY t
|
||||
FROM '/mnt/unprotected/tmp/postgres/t.csv'
|
||||
DELIMITER ','
|
||||
CSV HEADER;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS T_TS ON T (TS);
|
||||
|
||||
|
||||
------------
|
||||
-- Oracle --
|
||||
------------
|
||||
|
||||
CREATE TABLE t (
|
||||
id INTEGER,
|
||||
i INTEGER,
|
||||
c VARCHAR2(30),
|
||||
ts TIMESTAMP
|
||||
);
|
||||
|
||||
|
||||
|
||||
-- file t.ctl
|
||||
|
||||
LOAD DATA
|
||||
INFILE 't.csv'
|
||||
INTO TABLE t
|
||||
APPEND
|
||||
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
|
||||
TRAILING NULLCOLS
|
||||
(
|
||||
id INTEGER EXTERNAL,
|
||||
i INTEGER EXTERNAL,
|
||||
c CHAR(30),
|
||||
ts TIMESTAMP "YYYY-MM-DD HH24:MI:SS.FF"
|
||||
)
|
||||
|
||||
sqlldr "'/ as sysdba'" \
|
||||
control=t.ctl \
|
||||
log=t.log \
|
||||
bad=t.bad \
|
||||
rows=50000
|
||||
|
||||
|
||||
------------------
|
||||
-- TimescaleDB --
|
||||
------------------
|
||||
|
||||
Install & config from sources:
|
||||
https://www.tigerdata.com/docs/self-hosted/latest/install/installation-source
|
||||
|
||||
CREATE TABLE ht (
|
||||
id INTEGER,
|
||||
i INTEGER,
|
||||
c TEXT,
|
||||
ts TIMESTAMPTZ
|
||||
);
|
||||
|
||||
SELECT create_hypertable(
|
||||
'ht', -- table name
|
||||
'ts', -- time column
|
||||
chunk_time_interval => INTERVAL '1 month'
|
||||
);
|
||||
|
||||
SELECT add_retention_policy(
|
||||
'ht',
|
||||
INTERVAL '25 years'
|
||||
);
|
||||
|
||||
SELECT * FROM timescaledb_information.jobs
|
||||
WHERE proc_name = 'policy_retention';
|
||||
|
||||
SELECT alter_job(
|
||||
job_id => <your_job_id>,
|
||||
schedule_interval => INTERVAL '6 hours'
|
||||
);
|
||||
|
||||
timescaledb-parallel-copy --connection "postgres://postgres@localhost/db01" --table ht --file '/mnt/unprotected/tmp/postgres/t.csv' \
|
||||
--workers 16 --reporting-period 30s -skip-header
|
||||
|
||||
|
||||
SELECT show_chunks('t');
|
||||
|
||||
-----------
|
||||
-- Bench --
|
||||
-----------
|
||||
|
||||
-- q1
|
||||
select * from t where ts between timestamp'2015-04-01:09:00:00' and timestamp'2015-04-01:09:00:20';
|
||||
|
||||
-- q2
|
||||
select count(*) from t;
|
||||
|
||||
|
||||
|
||||
|
||||
Classic PostgreSQL
|
||||
|
||||
Table load: 5 min
|
||||
|
||||
q1: 52 sec
|
||||
q2: 45 sec
|
||||
|
||||
|
||||
|
||||
|
||||
TimescaleDB
|
||||
|
||||
Table load: 5 min
|
||||
|
||||
|
||||
db01=# SELECT pg_size_pretty(pg_total_relation_size('public.t'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
18 GB
|
||||
(1 row)
|
||||
|
||||
db01=# SELECT pg_size_pretty(hypertable_size('public.ht'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
19 GB
|
||||
(1 row)
|
||||
|
||||
|
||||
ALTER TABLE ht
|
||||
SET (
|
||||
timescaledb.compress
|
||||
);
|
||||
|
||||
|
||||
SELECT add_compression_policy(
|
||||
'ht',
|
||||
INTERVAL '2 years'
|
||||
);
|
||||
|
||||
SELECT job_id
|
||||
FROM timescaledb_information.jobs
|
||||
WHERE proc_name = 'policy_compression'
|
||||
AND hypertable_name = 'ht';
|
||||
|
||||
CALL run_job(1002);
|
||||
|
||||
|
||||
SELECT
|
||||
chunk_schema || '.' || chunk_name AS chunk,
|
||||
is_compressed,
|
||||
range_start,
|
||||
range_end
|
||||
FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'ht'
|
||||
ORDER BY range_start;
|
||||
|
||||
|
||||
|
||||
|
||||
-----------------------------------------
|
||||
|
||||
CREATE MATERIALIZED VIEW ht_hourly_avg
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', ts) AS bucket,
|
||||
AVG(i) AS avg_i
|
||||
FROM ht
|
||||
GROUP BY bucket;
|
||||
|
||||
SELECT add_continuous_aggregate_policy('ht_hourly_avg',
|
||||
start_offset => INTERVAL '2 days',
|
||||
end_offset => INTERVAL '0 hours',
|
||||
schedule_interval => INTERVAL '5 minutes'
|
||||
);
|
||||
|
||||
SELECT add_continuous_aggregate_policy('ht_hourly_avg',
|
||||
start_offset => INTERVAL '7 days',
|
||||
end_offset => INTERVAL '0 hours',
|
||||
schedule_interval => INTERVAL '30 minutes'
|
||||
);
|
||||
|
||||
|
||||
|
||||
SELECT *
|
||||
FROM ht_hourly_avg
|
||||
WHERE bucket >= now() - INTERVAL '7 days'
|
||||
ORDER BY bucket;
|
||||
|
||||
|
||||
|
||||
SELECT job_id, proc_name, config
|
||||
FROM timescaledb_information.jobs;
|
||||
|
||||
|
||||
SELECT pid, query, state, backend_type
|
||||
FROM pg_stat_activity
|
||||
WHERE query LIKE '%run_job%'
|
||||
AND query LIKE '%' || <job_id> || '%';
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
41
divers/tiny_root_CA_01.md
Normal file
41
divers/tiny_root_CA_01.md
Normal file
@@ -0,0 +1,41 @@
|
||||
> Based on article https://www.baeldung.com/openssl-self-signed-cert
|
||||
|
||||
## Build a home made root CA
|
||||
|
||||
mkdir -p /app/CA
|
||||
cd /app/CA
|
||||
|
||||
Create rootCA private key:
|
||||
|
||||
openssl genrsa -des3 -out rootCA.key 4096
|
||||
|
||||
Create rootCA certificate:
|
||||
|
||||
openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 7300 -out rootCA.pem
|
||||
|
||||
|
||||
## Generate client root CA signed certificate for a client
|
||||
|
||||
Client private key:
|
||||
|
||||
openssl genrsa -out raxus.swgalaxy.key 2048
|
||||
|
||||
Client certificate signature request:
|
||||
|
||||
openssl req -new -key raxus.swgalaxy.key -out raxus.swgalaxy.csr
|
||||
|
||||
Root CA create a signed certificate using the certificate signature request:
|
||||
|
||||
openssl x509 -req -CA rootCA.pem -CAkey rootCA.key -in raxus.swgalaxy.csr -out raxus.swgalaxy.crt -days 365 -CAcreateserial
|
||||
|
||||
Optionally create the full chain:
|
||||
|
||||
cat raxus.swgalaxy.crt rootCA.pem > raxus.swgalaxy.fullchain.crt
|
||||
|
||||
Optionally create an export to be imported into a Oracle wallet:
|
||||
|
||||
openssl pkcs12 -export \
|
||||
-in raxus.swgalaxy.crt \
|
||||
-inkey raxus.swgalaxy.key \
|
||||
-certfile rootCA.pem \
|
||||
-out raxus.swgalaxy.p12
|
||||
9
divers/use_cp_to_copy_hidden_files_01.md
Normal file
9
divers/use_cp_to_copy_hidden_files_01.md
Normal file
@@ -0,0 +1,9 @@
|
||||
== Use cp to copy hidden files
|
||||
|
||||
cp -r from/.[^.]* to/
|
||||
|
||||
Eample:
|
||||
|
||||
cd /root
|
||||
cp -r ./.[^.]* /mnt/unprotected/tmp/reinstall_coruscant/dom0/slash_root/
|
||||
|
||||
8
divers/windows_11_auto_login_01
Normal file
8
divers/windows_11_auto_login_01
Normal file
@@ -0,0 +1,8 @@
|
||||
# create local admin user
|
||||
net user vplesnila secret /add
|
||||
net localgroup administrators vplesnila /add
|
||||
|
||||
# setup autologin
|
||||
REG ADD "HKLM\Software\Microsoft\Windows NT\CurrentVersion\Winlogon" /v AutoAdminLogon /t REG_SZ /d 1 /f
|
||||
REG ADD "HKLM\Software\Microsoft\Windows NT\CurrentVersion\Winlogon" /v DefaultUserName /t REG_SZ /d vplesnila /f
|
||||
REG ADD "HKLM\Software\Microsoft\Windows NT\CurrentVersion\Winlogon" /v DefaultPassword /t REG_SZ /d secret /f
|
||||
3
divers/windows_11_create_local_admin_01.txt
Normal file
3
divers/windows_11_create_local_admin_01.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
net user USER-NAME PASSWORD /add
|
||||
net localgroup administrators USER-ACCOUNT /add
|
||||
|
||||
555
divers/xtts_non-cdb_to_cdb_01.md
Normal file
555
divers/xtts_non-cdb_to_cdb_01.md
Normal file
@@ -0,0 +1,555 @@
|
||||
## Context
|
||||
|
||||
- Source: non-CDB = GREEDO@rodia-scan
|
||||
- Target: PDB = REEK, CDB=AERONPRD@ylesia-scan
|
||||
|
||||
## Setup
|
||||
|
||||
Create tablespaces and users:
|
||||
|
||||
```
|
||||
create tablespace TS1 datafile size 16M autoextend on next 16M;
|
||||
create tablespace TS2 datafile size 16M autoextend on next 16M;
|
||||
create tablespace TS3 datafile size 16M autoextend on next 16M;
|
||||
|
||||
alter tablespace TS1 add datafile size 16M autoextend on next 16M;
|
||||
alter tablespace TS1 add datafile size 16M autoextend on next 16M;
|
||||
alter tablespace TS2 add datafile size 16M autoextend on next 16M;
|
||||
alter tablespace TS3 add datafile size 16M autoextend on next 16M;
|
||||
alter tablespace TS3 add datafile size 16M autoextend on next 16M;
|
||||
alter tablespace TS3 add datafile size 16M autoextend on next 16M;
|
||||
|
||||
create user U1 identified by secret;
|
||||
grant connect, resource, create view,create job to U1;
|
||||
alter user U1 quota unlimited on TS1;
|
||||
alter user U1 quota unlimited on TS2;
|
||||
alter user U1 quota unlimited on TS3;
|
||||
|
||||
create user U2 identified by secret;
|
||||
grant connect, resource, create view,create job to U2;
|
||||
alter user U2 quota unlimited on TS1;
|
||||
alter user U2 quota unlimited on TS2;
|
||||
alter user U2 quota unlimited on TS3;
|
||||
```
|
||||
|
||||
For each user, create objects:
|
||||
|
||||
connect U1/secret
|
||||
-- create objcts
|
||||
connect U2/secret
|
||||
-- create objcts
|
||||
|
||||
Create objects script:
|
||||
|
||||
```
|
||||
-- TABLE 1 dans TS1
|
||||
CREATE TABLE table1_ts1 (
|
||||
id NUMBER PRIMARY KEY,
|
||||
data VARCHAR2(100),
|
||||
created_at DATE DEFAULT SYSDATE
|
||||
) TABLESPACE TS1;
|
||||
|
||||
CREATE SEQUENCE table1_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NOCACHE
|
||||
NOCYCLE;
|
||||
|
||||
CREATE OR REPLACE TRIGGER trg_table1_id
|
||||
BEFORE INSERT ON table1_ts1
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF :NEW.id IS NULL THEN
|
||||
SELECT table1_seq.NEXTVAL INTO :NEW.id FROM dual;
|
||||
END IF;
|
||||
END;
|
||||
/
|
||||
|
||||
-- TABLE 2 dans TS2
|
||||
CREATE TABLE table2_ts2 (
|
||||
id NUMBER PRIMARY KEY,
|
||||
data VARCHAR2(100),
|
||||
updated_at DATE
|
||||
) TABLESPACE TS2;
|
||||
|
||||
CREATE SEQUENCE table2_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NOCACHE
|
||||
NOCYCLE;
|
||||
|
||||
CREATE OR REPLACE TRIGGER trg_table2_id
|
||||
BEFORE INSERT ON table2_ts2
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF :NEW.id IS NULL THEN
|
||||
SELECT table2_seq.NEXTVAL INTO :NEW.id FROM dual;
|
||||
END IF;
|
||||
END;
|
||||
/
|
||||
|
||||
-- TABLE 3 dans TS3
|
||||
CREATE TABLE table3_ts3 (
|
||||
id NUMBER PRIMARY KEY,
|
||||
info VARCHAR2(100),
|
||||
status VARCHAR2(20)
|
||||
) TABLESPACE TS3;
|
||||
|
||||
CREATE SEQUENCE table3_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NOCACHE
|
||||
NOCYCLE;
|
||||
|
||||
CREATE OR REPLACE TRIGGER trg_table3_id
|
||||
BEFORE INSERT ON table3_ts3
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
IF :NEW.id IS NULL THEN
|
||||
SELECT table3_seq.NEXTVAL INTO :NEW.id FROM dual;
|
||||
END IF;
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
CREATE OR REPLACE VIEW combined_view AS
|
||||
SELECT id, data, created_at, NULL AS updated_at, NULL AS status FROM table1_ts1
|
||||
UNION ALL
|
||||
SELECT id, data, updated_at, NULL AS created_at, NULL AS status FROM table2_ts2
|
||||
UNION ALL
|
||||
SELECT id, info AS data, NULL, NULL, status FROM table3_ts3;
|
||||
|
||||
|
||||
CREATE OR REPLACE PACKAGE data_ops AS
|
||||
PROCEDURE insert_random_data;
|
||||
PROCEDURE update_random_data;
|
||||
PROCEDURE delete_random_data;
|
||||
END data_ops;
|
||||
/
|
||||
|
||||
CREATE OR REPLACE PACKAGE BODY data_ops AS
|
||||
PROCEDURE insert_random_data IS
|
||||
BEGIN
|
||||
FOR i IN 1..10 LOOP
|
||||
INSERT INTO table1_ts1 (data)
|
||||
VALUES (DBMS_RANDOM.STRING('A', 10));
|
||||
END LOOP;
|
||||
|
||||
FOR i IN 1..3 LOOP
|
||||
INSERT INTO table3_ts3 (info, status)
|
||||
VALUES (DBMS_RANDOM.STRING('A', 10), 'NEW');
|
||||
END LOOP;
|
||||
END;
|
||||
|
||||
PROCEDURE update_random_data IS
|
||||
BEGIN
|
||||
FOR i IN 1..7 LOOP
|
||||
INSERT INTO table2_ts2 (data)
|
||||
VALUES (DBMS_RANDOM.STRING('A', 10));
|
||||
END LOOP;
|
||||
FOR rec IN (
|
||||
SELECT id FROM (
|
||||
SELECT id FROM table2_ts2 ORDER BY DBMS_RANDOM.VALUE
|
||||
) WHERE ROWNUM <= 5
|
||||
) LOOP
|
||||
UPDATE table2_ts2
|
||||
SET data = DBMS_RANDOM.STRING('A', 10), updated_at = SYSDATE
|
||||
WHERE id = rec.id;
|
||||
END LOOP;
|
||||
END;
|
||||
|
||||
PROCEDURE delete_random_data IS
|
||||
BEGIN
|
||||
FOR rec IN (
|
||||
SELECT id FROM (
|
||||
SELECT id FROM table3_ts3 ORDER BY DBMS_RANDOM.VALUE
|
||||
) WHERE ROWNUM <= 2
|
||||
) LOOP
|
||||
DELETE FROM table3_ts3 WHERE id = rec.id;
|
||||
END LOOP;
|
||||
END;
|
||||
END data_ops;
|
||||
/
|
||||
```
|
||||
|
||||
Create job to run every 1 minute:
|
||||
|
||||
```
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.CREATE_JOB (
|
||||
job_name => 'random_ops_job',
|
||||
job_type => 'PLSQL_BLOCK',
|
||||
job_action => '
|
||||
BEGIN
|
||||
data_ops.insert_random_data;
|
||||
data_ops.update_random_data;
|
||||
data_ops.delete_random_data;
|
||||
END;',
|
||||
start_date => SYSTIMESTAMP,
|
||||
repeat_interval => 'FREQ=MINUTELY; INTERVAL=1',
|
||||
enabled => TRUE,
|
||||
comments => 'Job to insert, update and delete random data every minute.'
|
||||
);
|
||||
END;
|
||||
/
|
||||
```
|
||||
|
||||
To restart the job:
|
||||
|
||||
```
|
||||
--Restart the job
|
||||
BEGIN
|
||||
DBMS_SCHEDULER.enable('random_ops_job');
|
||||
END;
|
||||
/
|
||||
```
|
||||
|
||||
Count the lines in tables:
|
||||
|
||||
```
|
||||
select
|
||||
'u1.table1_ts1:'||count(*) from u1.table1_ts1
|
||||
union select
|
||||
'u1.table2_ts2:'||count(*) from u1.table2_ts2
|
||||
union select
|
||||
'u1.table3_ts3:'||count(*) from u1.table3_ts3
|
||||
union select
|
||||
'u2.table1_ts1:'||count(*) from u2.table1_ts1
|
||||
union select
|
||||
'u2.table2_ts2:'||count(*) from u2.table2_ts2
|
||||
union select
|
||||
'u2.table3_ts3:'||count(*) from u2.table3_ts3
|
||||
order by 1 asc
|
||||
/
|
||||
```
|
||||
|
||||
To ensure the automatic opening of PDB, create a service to start automatically in the PDB:
|
||||
|
||||
srvctl add service -s adm_reek -db AERONPRD -preferred AERONPRD1,AERONPRD2,AERONPRD3 -pdb REEK -role PRIMARY
|
||||
srvctl start service -s adm_reek -db AERONPRD
|
||||
|
||||
|
||||
## XTTS
|
||||
|
||||
> Note MOS: V4 Reduce Transportable Tablespace Downtime using Cross Platform Incremental Backup (Doc ID 2471245.1)
|
||||
|
||||
### Initial setup
|
||||
|
||||
Identify tablespaces to transport, probably all non "administrative" tablespaces:
|
||||
|
||||
```
|
||||
select
|
||||
listagg(tablespace_name, ',')
|
||||
within group
|
||||
(order by tablespace_name) as non_sys_ts
|
||||
from
|
||||
dba_tablespaces
|
||||
where
|
||||
contents not in ('UNDO','TEMPORARY') and
|
||||
tablespace_name not in ('SYSTEM','SYSAUX');
|
||||
```
|
||||
|
||||
For source and target servers, define folders to be used for scripts, backupset, datapump etc.
|
||||
In our case, that will be a shared NFS folder `/mnt/unprotected/tmp/oracle/xtts`
|
||||
|
||||
> The size of folder should be greather than the size of full database.
|
||||
|
||||
Unzip xtts scripts:
|
||||
|
||||
cd /mnt/unprotected/tmp/oracle/xtts
|
||||
unzip /mnt/yavin4/kit/Oracle/XTTS/rman_xttconvert_VER4.3.zip
|
||||
|
||||
Configure xtt.properties file:
|
||||
|
||||
```
|
||||
tablespaces=TS1,TS2,TS3,USERS
|
||||
src_scratch_location=/mnt/unprotected/tmp/oracle/xtts/scratch
|
||||
dest_datafile_location=+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts/
|
||||
dest_scratch_location=/mnt/unprotected/tmp/oracle/xtts/scratch
|
||||
asm_home=/app/oracle/grid/product/19
|
||||
asm_sid=+ASM1
|
||||
destconnstr=sys/"Secret00!"@ylesia-scan/adm_reek
|
||||
usermantransport=1
|
||||
```
|
||||
|
||||
On target server, create ASM directory where the datafile will be restored:
|
||||
|
||||
mkdir +DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts
|
||||
|
||||
On **both source and target** servers, set `TMPDIR` environment variable to the path of xtts scripts:
|
||||
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
|
||||
### Prepare Phase
|
||||
|
||||
This step corresponds to initial full backup/restore of source database on target system.
|
||||
|
||||
Initial backup on source server:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --backup --debug 3
|
||||
```
|
||||
|
||||
Initial restore on target server:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --restore --debug 3
|
||||
```
|
||||
|
||||
> `debug` argument is optional
|
||||
|
||||
### Roll Forward Phase
|
||||
|
||||
As long as necessary we can do incremental backup/resore operations.
|
||||
|
||||
> New datafiles add to source database are automatically managed by this step.
|
||||
|
||||
The commands are exactly the sames (with or without debug mode).
|
||||
|
||||
For backup:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --backup
|
||||
```
|
||||
|
||||
For restore:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --restore
|
||||
```
|
||||
|
||||
> Running succesives backup or successive restore operations does not pose a problem.
|
||||
|
||||
### Final Incremental Backup
|
||||
|
||||
On **source** database, put tablespaces in **read-only** mode:
|
||||
|
||||
```
|
||||
select
|
||||
'alter tablespace '||tablespace_name||' read only;' as COMMAND
|
||||
from
|
||||
dba_tablespaces
|
||||
where
|
||||
contents not in ('UNDO','TEMPORARY') and
|
||||
tablespace_name not in ('SYSTEM','SYSAUX');
|
||||
```
|
||||
|
||||
Check:
|
||||
|
||||
```
|
||||
select distinct status
|
||||
from
|
||||
dba_tablespaces
|
||||
where
|
||||
contents not in ('UNDO','TEMPORARY') and
|
||||
tablespace_name not in ('SYSTEM','SYSAUX');
|
||||
```
|
||||
|
||||
Take final incremental backup:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --backup
|
||||
```
|
||||
|
||||
Rstore final incremental backup:
|
||||
|
||||
```
|
||||
export TMPDIR=/mnt/unprotected/tmp/oracle/xtts
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --restore
|
||||
```
|
||||
|
||||
### Metadata export
|
||||
|
||||
Create DATAPUMP directory on **both** source and destination databases.
|
||||
On source (non-CDB):
|
||||
|
||||
SQL> create or replace directory XTTS as '/mnt/unprotected/tmp/oracle/xtts';
|
||||
|
||||
On destination (PDB):
|
||||
|
||||
export ORACLE_PDB_SID=REEK
|
||||
SQL> create or replace directory XTTS as '/mnt/unprotected/tmp/oracle/xtts';
|
||||
|
||||
Export metadata
|
||||
|
||||
expdp userid="'/ as sysdba'" dumpfile=XTTS:metadata.dmp logfile=XTTS:metadata.log FULL=y TRANSPORTABLE=always
|
||||
|
||||
### Optionally: on target, pout target datafiles read-only at OS level
|
||||
|
||||
Identify OMF target datafiles:
|
||||
|
||||
```
|
||||
asmcmd -p
|
||||
cd +DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts
|
||||
ls --permission
|
||||
```
|
||||
|
||||
For each datafile, set read-olny permisions, example:
|
||||
|
||||
chmod 444 +DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts/*
|
||||
|
||||
If you got:
|
||||
|
||||
ORA-15304: operation requires ACCESS_CONTROL.ENABLED attribute to be TRUE (DBD ERROR: OCIStmtExecute)
|
||||
|
||||
then set following diskgroup attributes and retry.
|
||||
|
||||
```
|
||||
column dg_name format a20
|
||||
column name format a50
|
||||
column VALUE format a30
|
||||
|
||||
set lines 120
|
||||
|
||||
select
|
||||
dg.name dg_name, attr.name, attr.value
|
||||
from
|
||||
v$asm_attribute attr
|
||||
join v$asm_diskgroup dg on attr.group_number=dg.group_number
|
||||
where
|
||||
attr.name in ('compatible.rdbms','access_control.enabled')
|
||||
order by dg.name, attr.name
|
||||
/
|
||||
|
||||
|
||||
alter diskgroup DATA set attribute 'compatible.rdbms' = '19.0.0.0.0';
|
||||
alter diskgroup RECO set attribute 'compatible.rdbms' = '19.0.0.0.0';
|
||||
|
||||
alter diskgroup DATA set attribute 'access_control.enabled' = 'TRUE';
|
||||
alter diskgroup RECO set attribute 'access_control.enabled' = 'TRUE';
|
||||
```
|
||||
|
||||
> Compare number of datafiles transported and the number of datafiles of non-Oracle tablespaces
|
||||
> Check if transported tablespaces already exists on target database
|
||||
|
||||
### Metadata import and tablespace plug-in
|
||||
|
||||
Create impdp parfile `impo_metadata.par`:
|
||||
|
||||
```
|
||||
userid="/ as sysdba"
|
||||
dumpfile=XTTS:metadata.dmp
|
||||
logfile=XTTS:impo_metadata.log
|
||||
transport_datafiles=
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS1.290.1205059373,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS1.291.1205059373,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS1.298.1205060113,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS1.289.1205059373,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS2.293.1205059375,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS2.300.1205060113,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS2.292.1205059375,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS3.294.1205059381,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS3.295.1205059381,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS3.296.1205059381,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS3.297.1205059381,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/TS3.299.1205060113,
|
||||
+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/USERS.302.1205084171
|
||||
```
|
||||
|
||||
Run import:
|
||||
|
||||
impdp parfile=impo_metadata.par
|
||||
|
||||
|
||||
Rebounce the PDB (or the CDB), otherwise we can get errors like:
|
||||
|
||||
```
|
||||
ORA-01114: IO error writing block to file 33 (block # 1)
|
||||
ORA-01110: data file 33:
|
||||
'+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/ts1.298.1205060113'
|
||||
ORA-27009: cannot write to file opened for read
|
||||
```
|
||||
|
||||
Put plugged tablespaces in read/write mode:
|
||||
|
||||
```
|
||||
select
|
||||
'alter tablespace '||tablespace_name||' read write;' as COMMAND
|
||||
from
|
||||
dba_tablespaces
|
||||
where
|
||||
contents not in ('UNDO','TEMPORARY') and
|
||||
tablespace_name not in ('SYSTEM','SYSAUX');
|
||||
```
|
||||
|
||||
Remove aliases in order to user only OMF datafiles:
|
||||
|
||||
```
|
||||
cd +DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts
|
||||
rmalias ts1_8.dbf ts2_13.dbf... .... ...
|
||||
cd ..
|
||||
rm -rf xtts
|
||||
```
|
||||
|
||||
## Unxexpectd issues
|
||||
|
||||
In metadata import step I relize I forgot to include USER tablespace in `xtt.properties` and impdp failed wit error:
|
||||
|
||||
ORA-39352: Wrong number of TRANSPORT_DATAFILES specified: expected 13, received 12
|
||||
|
||||
The tablespace USER being in read-only mode I copied the datafile manually on target database.
|
||||
|
||||
Identify the file number:
|
||||
|
||||
```
|
||||
SQL> select FILE_ID from dba_data_files where TABLESPACE_NAME='USERS';
|
||||
|
||||
FILE_ID
|
||||
----------
|
||||
7
|
||||
```
|
||||
|
||||
Backup datafile on source:
|
||||
|
||||
```
|
||||
run{
|
||||
set nocfau;
|
||||
backup datafile 7 format '/mnt/unprotected/tmp/oracle/xtts/%d_%U_%s_%t.bck';
|
||||
}
|
||||
```
|
||||
|
||||
Restore datafile on target;
|
||||
|
||||
```
|
||||
run {
|
||||
restore from platform 'Linux x86 64-bit'
|
||||
foreign datafile 7 format '+DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/xtts//USERS.dbf'
|
||||
from backupset '/mnt/unprotected/tmp/oracle/xtts/GREEDO_0i3t87ss_18_1_1_18_1205084060.bck';
|
||||
}
|
||||
```
|
||||
|
||||
Put datafile in read-ony at ASM level:
|
||||
|
||||
chmod 444 +DATA/AERONPRD/389011A6CB11A654E0635000A8C07D80/DATAFILE/USERS.302.1205084171
|
||||
|
||||
Run the impdp again.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Having datafile to plug-in in read-only mode at ASM level allow to repeat tne impdp operations as many time as necessary.
|
||||
For example, to completly re-execute the impdp metadata as on initial conditions:
|
||||
- drop new plugged tablespaces
|
||||
- drop non oracle maintened users
|
||||
- run impdp metadata again
|
||||
|
||||
```
|
||||
drop tablespace TS1 including contents;
|
||||
drop tablespace TS2 including contents;
|
||||
drop tablespace TS3 including contents;
|
||||
drop tablespace USERS including contents;
|
||||
|
||||
select 'drop user '||USERNAME||' cascade;' from dba_users where ORACLE_MAINTAINED='N';
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user