2026-03-12 20:51:59
This commit is contained in:
1
bash/Readme.md
Normal file
1
bash/Readme.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
empty
|
||||||
16
bash/adrci_purge.sh
Normal file
16
bash/adrci_purge.sh
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
|
||||||
|
AGE1MIN=0
|
||||||
|
AGE1DAYS=1440
|
||||||
|
AGE7DAYS=10080
|
||||||
|
AGE10DAYS=14400
|
||||||
|
AGE15DAYS=21600
|
||||||
|
AGE30DAYS=43200
|
||||||
|
|
||||||
|
PURGETARGET=$AGE1MIN
|
||||||
|
|
||||||
|
for f in $( adrci exec="show homes" | grep -v "ADR Homes:" );
|
||||||
|
do
|
||||||
|
echo "Purging ${f}:";
|
||||||
|
adrci exec="set home $f; purge -age $PURGETARGET ;" ;
|
||||||
|
done
|
||||||
11
bash/backup_tiddly_notes.sh
Normal file
11
bash/backup_tiddly_notes.sh
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
|
||||||
|
DIR=/app/persistent_docker/tiddlywiki
|
||||||
|
WIKI=mywiki
|
||||||
|
BACKUPDIR=/mnt/yavin4/data/a.Very_Important_Data/backup_TiddlyWiki
|
||||||
|
DATE=`date +%Y-%m-%d_%H-%M`
|
||||||
|
cd ${DIR}
|
||||||
|
tar -cvf - ${WIKI} | gzip > ${BACKUPDIR}/${WIKI}_${DATE}.tar.gz
|
||||||
|
cd ${BACKUPDIR}
|
||||||
|
find ./ -name '${WIKI}*.tar.gz' -mtime +45 -exec rm {} \;
|
||||||
|
|
||||||
63
bash/cifs_veracrypt_resilio_start
Normal file
63
bash/cifs_veracrypt_resilio_start
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
|
||||||
|
URL_ALIVE="https://public.databasepro.fr/.secrets/alive.txt"
|
||||||
|
URL_ENC_PASS="https://public.databasepro.fr/.secrets/linux.txt"
|
||||||
|
SECRET="For#LinuxM1ntVer@crypt"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RUNTIME="1 minute"
|
||||||
|
ENDTIME=$(date -ud "$RUNTIME" +%s)
|
||||||
|
|
||||||
|
while [[ $(date -u +%s) -le $ENDTIME ]]
|
||||||
|
do
|
||||||
|
echo -n "`date +%H:%M:%S`: waiting for remote encrypyted password file.. "
|
||||||
|
ALIVE=$(curl -s ${URL_ALIVE})
|
||||||
|
if [ "$ALIVE" == "yes" ]; then
|
||||||
|
echo "OK"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "retrying in 10 seconds.."
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$ALIVE" != "yes" ]; then
|
||||||
|
echo "Remote encrypyted password file is not available, giving up"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Get encrypted passwords JSON from URL
|
||||||
|
JSON_ENC_PASS=$(curl -s ${URL_ENC_PASS})
|
||||||
|
|
||||||
|
# Decode JSON
|
||||||
|
ENC_PASS_CIFS=$(jq '.cifs' <<< "${JSON_ENC_PASS}")
|
||||||
|
ENC_PASS_VERACRYPT=$(jq '.veracrypt' <<< "${JSON_ENC_PASS}")
|
||||||
|
|
||||||
|
# Remove first and last double quote from values
|
||||||
|
AUX="${ENC_PASS_CIFS%\"}"
|
||||||
|
AUX="${AUX#\"}"
|
||||||
|
ENC_PASS_CIFS=${AUX}
|
||||||
|
|
||||||
|
AUX="${ENC_PASS_VERACRYPT%\"}"
|
||||||
|
AUX="${AUX#\"}"
|
||||||
|
ENC_PASS_VERACRYPT=${AUX}
|
||||||
|
|
||||||
|
# Uncrypt passwords
|
||||||
|
PASS_CIFS=$(echo ${ENC_PASS_CIFS} | openssl enc -aes-256-cbc -md sha512 -a -d -pbkdf2 -iter 100000 -salt -pass pass:${SECRET})
|
||||||
|
PASS_VERACRYPT=$(echo ${ENC_PASS_VERACRYPT} | openssl enc -aes-256-cbc -md sha512 -a -d -pbkdf2 -iter 100000 -salt -pass pass:${SECRET})
|
||||||
|
|
||||||
|
# Mount CIFS
|
||||||
|
sudo mount -t cifs //192.168.0.9/share /mnt/yavin4 -o vers=2.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,user=vplesnila,password=${PASS_CIFS},mfsymlinks
|
||||||
|
|
||||||
|
# Mount Veracrypt volume
|
||||||
|
veracrypt --text --mount /home/vplesnila/data/veracrypt_01.volume /mnt/rslsync --pim 0 --keyfiles "" --protect-hidden no --slot 1 --password ${PASS_VERACRYPT} --verbose
|
||||||
|
|
||||||
|
# Start Resilio Sync
|
||||||
|
systemctl --user start resilio-sync
|
||||||
|
|
||||||
|
# Show FS
|
||||||
|
df -hT
|
||||||
|
|
||||||
|
|
||||||
13
bash/cifs_veracrypt_resilio_stop
Normal file
13
bash/cifs_veracrypt_resilio_stop
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
|
||||||
|
# Stop Resilio Sync
|
||||||
|
systemctl --user stop resilio-sync
|
||||||
|
|
||||||
|
# Dismount CIFS
|
||||||
|
sudo umount /mnt/yavin4
|
||||||
|
|
||||||
|
# Dismount Veracrypt volume
|
||||||
|
veracrypt --text --dismount --slot 1
|
||||||
|
|
||||||
|
# Show FS
|
||||||
|
df -hT
|
||||||
19
bash/clean_vm.sh
Normal file
19
bash/clean_vm.sh
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
rm -rf /app/oracle/base/admin/*
|
||||||
|
rm -rf /app/oracle/base/dbs/*
|
||||||
|
rm -rf /app/oracle/base/homes/*/dbs/*
|
||||||
|
|
||||||
|
rm -rf /app/oracle/product/11.2/dbs/*
|
||||||
|
rm -rf /app/oracle/product/12.1/dbs/*
|
||||||
|
rm -rf /app/oracle/product/12.2/dbs/*
|
||||||
|
rm -rf /app/oracle/product/19/dbs/*
|
||||||
|
rm -rf /app/oracle/product/21/dbs/*
|
||||||
|
|
||||||
|
rm -rf /data/*
|
||||||
|
rm -rf /reco/*
|
||||||
|
|
||||||
|
rm -rf /home/oracle/oradiag_oracle
|
||||||
|
rm -rf /app/oracle/base/homes/OraDB19Home1/network/admin/*
|
||||||
|
|
||||||
|
> /etc/listener.ora
|
||||||
|
> /etc/tnsnames.ora
|
||||||
|
|
||||||
135
bash/crdb.sh
Normal file
135
bash/crdb.sh
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo "Usage: crdb -n|--name <DB_NAME> -v|--version <19|21>"
|
||||||
|
}
|
||||||
|
|
||||||
|
########
|
||||||
|
# MAIN #
|
||||||
|
########
|
||||||
|
|
||||||
|
# parameter processing
|
||||||
|
while [ "$1" != "" ]; do
|
||||||
|
case $1 in
|
||||||
|
-n | --name ) shift
|
||||||
|
DB_NAME="$1"
|
||||||
|
;;
|
||||||
|
-v | --version ) shift
|
||||||
|
VERSION="$1"
|
||||||
|
;;
|
||||||
|
* ) usage
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${DB_NAME}" ] || [ -z "${VERSION}" ]
|
||||||
|
then
|
||||||
|
usage
|
||||||
|
exit -2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# version/template selection
|
||||||
|
if [ "$VERSION" == "21" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET21
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tech/oracle/orabackup/_keep_/Standalone/21
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_BASE}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VERSION" == "19" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET19
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tech/oracle/orabackup/_keep_/Standalone/19
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_BASE}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "${BACKUP_DIR}" ]
|
||||||
|
then
|
||||||
|
echo "No template found for this database version"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# admin directories creation
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/adump
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/spfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/divers
|
||||||
|
|
||||||
|
|
||||||
|
# init and spfile creation
|
||||||
|
cp ${BACKUP_DIR}/init${BACKUP_DB_NAME}PRD.ora ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
sed -i -r "s/${BACKUP_DB_NAME}/${DB_NAME}/" ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
|
||||||
|
echo ${ORACLE_SID}
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
create spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora' from pfile='${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora';
|
||||||
|
startup nomount;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# duplicate from template
|
||||||
|
rman auxiliary / <<EOF!
|
||||||
|
run
|
||||||
|
{
|
||||||
|
allocate auxiliary channel aux01 device type disk;
|
||||||
|
allocate auxiliary channel aux02 device type disk;
|
||||||
|
allocate auxiliary channel aux03 device type disk;
|
||||||
|
allocate auxiliary channel aux04 device type disk;
|
||||||
|
allocate auxiliary channel aux05 device type disk;
|
||||||
|
allocate auxiliary channel aux06 device type disk;
|
||||||
|
allocate auxiliary channel aux07 device type disk;
|
||||||
|
allocate auxiliary channel aux08 device type disk;
|
||||||
|
allocate auxiliary channel aux09 device type disk;
|
||||||
|
allocate auxiliary channel aux10 device type disk;
|
||||||
|
duplicate target database to ${DB_NAME} backup location '${BACKUP_DIR}/backupset/';
|
||||||
|
}
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
shutdown immediate;
|
||||||
|
startup;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# datapatch & recompile
|
||||||
|
cd ${ORACLE_HOME}/OPatch
|
||||||
|
./datapatch
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
-- recompile all in PDB$SEED
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read write instances=ALL;
|
||||||
|
alter session set container=PDB\$SEED;
|
||||||
|
alter session set "_ORACLE_SCRIPT"=true;
|
||||||
|
@?/rdbms/admin/utlrp
|
||||||
|
alter session set "_ORACLE_SCRIPT"=false;
|
||||||
|
alter session set container=CDB\$ROOT;
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read only instances=ALL;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# NEWID
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
shutdown immediate;
|
||||||
|
startup mount exclusive;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
nid TARGET=/ LOGFILE=/tmp/nid.log
|
||||||
|
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
startup mount exclusive;
|
||||||
|
alter database open resetlogs;
|
||||||
|
shutdown immediate;
|
||||||
|
startup;
|
||||||
|
EOF!
|
||||||
142
bash/crdb.sh.2024-06-16
Normal file
142
bash/crdb.sh.2024-06-16
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo "Usage: crdb -n|--name <DB_NAME> -v|--version <11.2.0.4|12.1|12.2|19|21>"
|
||||||
|
}
|
||||||
|
|
||||||
|
########
|
||||||
|
# MAIN #
|
||||||
|
########
|
||||||
|
|
||||||
|
# parameter processing
|
||||||
|
while [ "$1" != "" ]; do
|
||||||
|
case $1 in
|
||||||
|
-n | --name ) shift
|
||||||
|
DB_NAME="$1"
|
||||||
|
;;
|
||||||
|
-v | --version ) shift
|
||||||
|
VERSION="$1"
|
||||||
|
;;
|
||||||
|
* ) usage
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${DB_NAME}" ] || [ -z "${VERSION}" ]
|
||||||
|
then
|
||||||
|
usage
|
||||||
|
exit -2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# version/template selection
|
||||||
|
if [ "$VERSION" == "21" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET21
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/21
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_BASE}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VERSION" == "19" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET19
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/19
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_HOME}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$VERSION" == "11.2.0.4" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET112
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=WEDGE
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/11.2.0.4
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_HOME}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "${BACKUP_DIR}" ]
|
||||||
|
then
|
||||||
|
echo "No template found for this database version"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# admin directories creation
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/adump
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/spfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/divers
|
||||||
|
|
||||||
|
# not automaticly created in 11.2.0.4
|
||||||
|
mkdir -p /data/${ORACLE_SID}
|
||||||
|
|
||||||
|
# init and spfile creation
|
||||||
|
cp ${BACKUP_DIR}/init${BACKUP_DB_NAME}PRD.ora ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
sed -i -r "s/${BACKUP_DB_NAME}/${DB_NAME}/" ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
|
||||||
|
echo ${ORACLE_SID}
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
create spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora' from pfile='${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora';
|
||||||
|
startup nomount;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# duplicate from template
|
||||||
|
rman auxiliary / <<EOF!
|
||||||
|
run
|
||||||
|
{
|
||||||
|
allocate auxiliary channel aux01 device type disk;
|
||||||
|
allocate auxiliary channel aux02 device type disk;
|
||||||
|
allocate auxiliary channel aux03 device type disk;
|
||||||
|
allocate auxiliary channel aux04 device type disk;
|
||||||
|
allocate auxiliary channel aux05 device type disk;
|
||||||
|
allocate auxiliary channel aux06 device type disk;
|
||||||
|
allocate auxiliary channel aux07 device type disk;
|
||||||
|
allocate auxiliary channel aux08 device type disk;
|
||||||
|
allocate auxiliary channel aux09 device type disk;
|
||||||
|
allocate auxiliary channel aux10 device type disk;
|
||||||
|
duplicate target database to ${DB_NAME} backup location '${BACKUP_DIR}/backupset/';
|
||||||
|
}
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
shutdown immediate;
|
||||||
|
startup;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
|
||||||
|
# apply last installed PSU
|
||||||
|
if [ "$VERSION" == "11.2.0.4" ]; then
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
@$ORACLE_HOME/rdbms/admin/catbundle psu apply
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
EOF!
|
||||||
|
else
|
||||||
|
cd ${ORACLE_HOME}/OPatch
|
||||||
|
./datapatch
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
-- recompile all in PDB$SEED
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read write instances=ALL;
|
||||||
|
alter session set container=PDB\$SEED;
|
||||||
|
alter session set "_ORACLE_SCRIPT"=true;
|
||||||
|
@?/rdbms/admin/utlrp
|
||||||
|
alter session set "_ORACLE_SCRIPT"=false;
|
||||||
|
alter session set container=CDB\$ROOT;
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read only instances=ALL;
|
||||||
|
EOF!
|
||||||
|
fi
|
||||||
|
|
||||||
135
bash/crdb.sh.2026-02-21
Normal file
135
bash/crdb.sh.2026-02-21
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo "Usage: crdb -n|--name <DB_NAME> -v|--version <19|21>"
|
||||||
|
}
|
||||||
|
|
||||||
|
########
|
||||||
|
# MAIN #
|
||||||
|
########
|
||||||
|
|
||||||
|
# parameter processing
|
||||||
|
while [ "$1" != "" ]; do
|
||||||
|
case $1 in
|
||||||
|
-n | --name ) shift
|
||||||
|
DB_NAME="$1"
|
||||||
|
;;
|
||||||
|
-v | --version ) shift
|
||||||
|
VERSION="$1"
|
||||||
|
;;
|
||||||
|
* ) usage
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "${DB_NAME}" ] || [ -z "${VERSION}" ]
|
||||||
|
then
|
||||||
|
usage
|
||||||
|
exit -2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# version/template selection
|
||||||
|
if [ "$VERSION" == "21" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET21
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/21
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_BASE}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VERSION" == "19" ]; then
|
||||||
|
. oraenv <<EOF!
|
||||||
|
SET19
|
||||||
|
EOF!
|
||||||
|
export ORACLE_SID=${DB_NAME}PRD
|
||||||
|
BACKUP_DB_NAME=ASTY
|
||||||
|
BACKUP_DIR=/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/19
|
||||||
|
echo "spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora'" > ${ORACLE_BASE}/dbs/init${ORACLE_SID}.ora
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "${BACKUP_DIR}" ]
|
||||||
|
then
|
||||||
|
echo "No template found for this database version"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# admin directories creation
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/adump
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/spfile
|
||||||
|
mkdir -p ${ORACLE_BASE}/admin/${ORACLE_SID}/divers
|
||||||
|
|
||||||
|
|
||||||
|
# init and spfile creation
|
||||||
|
cp ${BACKUP_DIR}/init${BACKUP_DB_NAME}PRD.ora ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
sed -i -r "s/${BACKUP_DB_NAME}/${DB_NAME}/" ${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora
|
||||||
|
|
||||||
|
echo ${ORACLE_SID}
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
create spfile='${ORACLE_BASE}/admin/${ORACLE_SID}/spfile/spfile${ORACLE_SID}.ora' from pfile='${ORACLE_BASE}/admin/${ORACLE_SID}/pfile/init${ORACLE_SID}.ora';
|
||||||
|
startup nomount;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# duplicate from template
|
||||||
|
rman auxiliary / <<EOF!
|
||||||
|
run
|
||||||
|
{
|
||||||
|
allocate auxiliary channel aux01 device type disk;
|
||||||
|
allocate auxiliary channel aux02 device type disk;
|
||||||
|
allocate auxiliary channel aux03 device type disk;
|
||||||
|
allocate auxiliary channel aux04 device type disk;
|
||||||
|
allocate auxiliary channel aux05 device type disk;
|
||||||
|
allocate auxiliary channel aux06 device type disk;
|
||||||
|
allocate auxiliary channel aux07 device type disk;
|
||||||
|
allocate auxiliary channel aux08 device type disk;
|
||||||
|
allocate auxiliary channel aux09 device type disk;
|
||||||
|
allocate auxiliary channel aux10 device type disk;
|
||||||
|
duplicate target database to ${DB_NAME} backup location '${BACKUP_DIR}/backupset/';
|
||||||
|
}
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
shutdown immediate;
|
||||||
|
startup;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# datapatch & recompile
|
||||||
|
cd ${ORACLE_HOME}/OPatch
|
||||||
|
./datapatch
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
-- recompile all in PDB$SEED
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read write instances=ALL;
|
||||||
|
alter session set container=PDB\$SEED;
|
||||||
|
alter session set "_ORACLE_SCRIPT"=true;
|
||||||
|
@?/rdbms/admin/utlrp
|
||||||
|
alter session set "_ORACLE_SCRIPT"=false;
|
||||||
|
alter session set container=CDB\$ROOT;
|
||||||
|
alter pluggable database PDB\$SEED close immediate instances=ALL;
|
||||||
|
alter pluggable database PDB\$SEED open read only instances=ALL;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
# NEWID
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
shutdown immediate;
|
||||||
|
startup mount exclusive;
|
||||||
|
EOF!
|
||||||
|
|
||||||
|
nid TARGET=/ LOGFILE=/tmp/nid.log
|
||||||
|
|
||||||
|
sqlplus /nolog <<EOF!
|
||||||
|
connect / as sysdba
|
||||||
|
startup mount exclusive;
|
||||||
|
alter database open resetlogs;
|
||||||
|
shutdown immediate;
|
||||||
|
startup;
|
||||||
|
EOF!
|
||||||
2
bash/kvm_dump_guest_xml.sh
Normal file
2
bash/kvm_dump_guest_xml.sh
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
virsh list --all --name | xargs -I{} echo "virsh dumpxml {} > /mnt/yavin4/tmp/_oracle_/tmp/{}.xml"
|
||||||
|
|
||||||
7
bash/oracle_path_swgalaxy.sh
Normal file
7
bash/oracle_path_swgalaxy.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
P1=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/tpt
|
||||||
|
P2=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/idev
|
||||||
|
P3=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/vg
|
||||||
|
P4=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/vdh
|
||||||
|
P5=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/Kevin_Meade
|
||||||
|
P6=/mnt/yavin4/tmp/_oracle_/code_gitlab/oracle/my
|
||||||
|
export ORACLE_PATH=${P1}:${P2}:${P3}:${P4}:${P5}:${P6}
|
||||||
17
bash/quemu-mount.sh
Normal file
17
bash/quemu-mount.sh
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# usage: qemu-mount {imagefile}
|
||||||
|
# 1st argument: QEMU raw image file
|
||||||
|
|
||||||
|
if [ $# -ne 1 ] ; then
|
||||||
|
echo 'usage: qemu-mount imagefile'
|
||||||
|
echo 'Mounts a QEMU raw image file to /mnt/temp'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
start=$( fdisk -l -o Device,Start ${1} | grep "^${1}1" | gawk '{print $2}' )
|
||||||
|
sectors=$( fdisk -l ${1} | grep '^Units: sectors of' | gawk '{print $(NF-1)}' )
|
||||||
|
offset=$(( $start * $sectors ))
|
||||||
|
|
||||||
|
[ -d /mnt/temp ] || mkdir /mnt/temp
|
||||||
|
sudo mount -o loop,offset=$offset ${1} /mnt/temp
|
||||||
|
|
||||||
111
bash/vmclone.sh
Normal file
111
bash/vmclone.sh
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo "Usage: vmclone -s|--source <SOURCE_VM> -t|--target <TARGET_VM> -o|--scriptonly"
|
||||||
|
echo " o| --scriptonly, only clone script is generated, the clone command will not be executed"
|
||||||
|
}
|
||||||
|
|
||||||
|
########
|
||||||
|
# MAIN #
|
||||||
|
########
|
||||||
|
|
||||||
|
typeset SOURCE_VM
|
||||||
|
typeset TARGET_VM
|
||||||
|
typeset SCRIPTONLY
|
||||||
|
typeset CLONESCRIPT="/tmp/kvmclone.sh"
|
||||||
|
typeset -i DISK_COUNT_SOURCE
|
||||||
|
typeset -i I
|
||||||
|
typeset RC
|
||||||
|
typeset REPLACE
|
||||||
|
|
||||||
|
# parameter processing
|
||||||
|
while [ "$1" != "" ]; do
|
||||||
|
case $1 in
|
||||||
|
-s | --source ) shift
|
||||||
|
SOURCE_VM="$1"
|
||||||
|
;;
|
||||||
|
-t | --target ) shift
|
||||||
|
TARGET_VM="$1"
|
||||||
|
;;
|
||||||
|
-o | --scriptonly ) SCRIPTONLY=true
|
||||||
|
;;
|
||||||
|
-r | --replace ) REPLACE=true
|
||||||
|
;;
|
||||||
|
-h | --help ) usage
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
* ) usage
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "$SOURCE_VM" ] || [ -z "$TARGET_VM" ]; then
|
||||||
|
usage
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if source VM exists
|
||||||
|
I=$(virsh list --all | grep $SOURCE_VM | wc -l)
|
||||||
|
if (( I == 0 )); then
|
||||||
|
echo "Source VM ($SOURCE_VM) does not exists"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if source VM is shut off
|
||||||
|
RC=$(virsh domstate $SOURCE_VM)
|
||||||
|
if [ "$RC" != "shut off" ]; then
|
||||||
|
echo "Source VM ($SOURCE_VM) is $RC, please shut it down first"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if target VM exists
|
||||||
|
I=$(virsh list --all | grep $TARGET_VM | wc -l)
|
||||||
|
if (( I > 0 )); then
|
||||||
|
# -r|--replace option was used
|
||||||
|
if [ "$REPLACE" == true ] ; then
|
||||||
|
# destroy the VM and the underlayng storage
|
||||||
|
echo "Shutdown VM $TARGET_VM"
|
||||||
|
virsh destroy $TARGET_VM > /dev/null 2>&1
|
||||||
|
echo "Remove VM $TARGET_VM"
|
||||||
|
virsh undefine $TARGET_VM --remove-all-storage > /dev/null 2>&1
|
||||||
|
else
|
||||||
|
echo "Target VM ($TARGET_VM) already exists, use -r|--replace option to replace it"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# generate KVM clone shell
|
||||||
|
rm -rf $CLONESCRIPT
|
||||||
|
echo -e "#!/usr/bin/bash"'\n' >> $CLONESCRIPT
|
||||||
|
chmod +x $CLONESCRIPT
|
||||||
|
|
||||||
|
echo "virt-clone \\" >> $CLONESCRIPT
|
||||||
|
echo " --original $SOURCE_VM \\" >> $CLONESCRIPT
|
||||||
|
echo " --name $TARGET_VM \\" >> $CLONESCRIPT
|
||||||
|
|
||||||
|
DISK_COUNT_SOURCE=$(virsh domblklist ${SOURCE_VM} --details | grep -v cdrom | grep "file" | wc -l)
|
||||||
|
|
||||||
|
I=0
|
||||||
|
for DISK in $(virsh domblklist ${SOURCE_VM} --details | grep -v cdrom | grep "file" | awk -F " " '{ print $4}')
|
||||||
|
do
|
||||||
|
I=$((I+1))
|
||||||
|
NEWDISK=${DISK/$SOURCE_VM/$TARGET_VM}
|
||||||
|
if (( I < DISK_COUNT_SOURCE )); then
|
||||||
|
echo " --file $NEWDISK \\" >> $CLONESCRIPT
|
||||||
|
else
|
||||||
|
echo " --file $NEWDISK" >> $CLONESCRIPT
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "KVM clone script [$SOURCE_VM -> $TARGET_VM] has been generated: $CLONESCRIPT"
|
||||||
|
|
||||||
|
if [ "$SCRIPTONLY" != true ] ; then
|
||||||
|
# recreate VM storage directories
|
||||||
|
cat $CLONESCRIPT | grep '\-\-file' | xargs -I{} echo {} | sed "s/--file//g" | sed 's/ //g' | xargs -I{} dirname {} | sort -u | xargs -I{} rm -rf {}
|
||||||
|
cat $CLONESCRIPT | grep '\-\-file' | xargs -I{} echo {} | sed "s/--file//g" | sed 's/ //g' | xargs -I{} dirname {} | sort -u | xargs -I{} mkdir -p {}
|
||||||
|
# Run generated KVM clone shell
|
||||||
|
echo "Executing $CLONESCRIPT.."
|
||||||
|
$CLONESCRIPT
|
||||||
|
fi
|
||||||
120
divers/oracle_RAT_01.md
Normal file
120
divers/oracle_RAT_01.md
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
> [Original article](https://oracle-base.com/articles/8i/resource-manager-8i)
|
||||||
|
|
||||||
|
Create application users:
|
||||||
|
|
||||||
|
create user web_user identified by "iN_j8sC#d!kX6b:_";
|
||||||
|
create user batch_user identified by "r~65ktuFYyds+P_X";
|
||||||
|
grant connect,resource to web_user;
|
||||||
|
grant connect,resource to batch_user;
|
||||||
|
|
||||||
|
|
||||||
|
Create a pending area:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.clear_pending_area;
|
||||||
|
DBMS_RESOURCE_MANAGER.create_pending_area;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Create a plan:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan(
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
comment => 'Plan for a combination of high and low priority tasks.');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Create a web and a batch consumer group:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||||
|
consumer_group => 'WEB_CG',
|
||||||
|
comment => 'Web based OTLP processing - high priority');
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||||
|
consumer_group => 'BATCH_CG',
|
||||||
|
comment => 'Batch processing - low priority');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Assign the consumer groups to the plan and indicate their relative priority, remembering to add the OTHER_GROUPS plan directive:
|
||||||
|
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'web_cg',
|
||||||
|
comment => 'High Priority',
|
||||||
|
cpu_p1 => 80,
|
||||||
|
cpu_p2 => 0,
|
||||||
|
parallel_degree_limit_p1 => 4);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'batch_cg',
|
||||||
|
comment => 'Low Priority',
|
||||||
|
cpu_p1 => 0,
|
||||||
|
cpu_p2 => 80,
|
||||||
|
parallel_degree_limit_p1 => 4);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive(
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'OTHER_GROUPS',
|
||||||
|
comment => 'all other users - level 3',
|
||||||
|
cpu_p1 => 0,
|
||||||
|
cpu_p2 => 0,
|
||||||
|
cpu_p3 => 100);
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Validate and apply the resource plan:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.validate_pending_area;
|
||||||
|
DBMS_RESOURCE_MANAGER.submit_pending_area;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Assign our users to individual consumer groups:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
-- Assign users to consumer groups
|
||||||
|
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||||
|
grantee_name => 'web_user',
|
||||||
|
consumer_group => 'web_cg',
|
||||||
|
grant_option => FALSE);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||||
|
grantee_name => 'batch_user',
|
||||||
|
consumer_group => 'batch_cg',
|
||||||
|
grant_option => FALSE);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('web_user', 'web_cg');
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('batch_user', 'batch_cg');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
Connect users:
|
||||||
|
|
||||||
|
connect web_user/"iN_j8sC#d!kX6b:_"
|
||||||
|
connect batch_user/"r~65ktuFYyds+P_X"
|
||||||
|
|
||||||
|
Check `resource_consumer_group` column in `v$session`:
|
||||||
|
|
||||||
|
SELECT username, resource_consumer_group
|
||||||
|
FROM v$session
|
||||||
|
WHERE username IN ('WEB_USER','BATCH_USER');
|
||||||
|
|
||||||
|
Note that the value change for a connecte session if `RESOURCE_MANAGER_PLAN` change at instance level:
|
||||||
|
|
||||||
|
alter system set RESOURCE_MANAGER_PLAN = 'hybrid_plan' scope=both sid='*';
|
||||||
|
alter system set RESOURCE_MANAGER_PLAN = '' scope=both sid='*';
|
||||||
|
|
||||||
93
divers/oracle_RAT_01.txt
Normal file
93
divers/oracle_RAT_01.txt
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
-- https://orabliss.blogspot.com/2016/02/oracle-rat-demo-part-1.html
|
||||||
|
|
||||||
|
CREATE PLUGGABLE DATABASE KENOBI ADMIN USER pdbdmin IDENTIFIED BY "RunDatab1218ase#1985Go!";
|
||||||
|
alter pluggable database KENOBI open;
|
||||||
|
alter pluggable database KENOBI save state;
|
||||||
|
alter session set container=KENOBI;
|
||||||
|
show con_name
|
||||||
|
|
||||||
|
@?/rdbms/admin/utlrp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
CREATE PLUGGABLE DATABASE MAUL ADMIN USER pdbdmin IDENTIFIED BY "RunDatab1218ase#1985Go!";
|
||||||
|
alter pluggable database MAUL open;
|
||||||
|
alter pluggable database MAUL save state;
|
||||||
|
alter session set container=MAUL;
|
||||||
|
show con_name
|
||||||
|
|
||||||
|
@?/rdbms/admin/utlrp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
set nocfau;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/JEDIPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/JEDIPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/JEDIPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/JEDIPRD/%d_%U_%s_%t.bck';
|
||||||
|
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||||
|
release channel ch01;
|
||||||
|
release channel ch02;
|
||||||
|
release channel ch03;
|
||||||
|
release channel ch04;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/JEDIPRD/%d_%U_%s_%t.controlfile';
|
||||||
|
backup current controlfile;
|
||||||
|
release channel ch01;
|
||||||
|
}
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
set nocfau;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/SITHPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/SITHPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/SITHPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/SITHPRD/%d_%U_%s_%t.bck';
|
||||||
|
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||||
|
release channel ch01;
|
||||||
|
release channel ch02;
|
||||||
|
release channel ch03;
|
||||||
|
release channel ch04;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/rat1/SITHPRD/%d_%U_%s_%t.controlfile';
|
||||||
|
backup current controlfile;
|
||||||
|
release channel ch01;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
create or replace directory RAT_WORKLOAD as '/home/or
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
create or replace directory RAT_WORKLOAD as '/home/oracle/rat';
|
||||||
|
execute dbms_workload_capture.start_capture('RAT_CAPTURE','RAT_WORKLOAD');
|
||||||
|
-- execute Workload
|
||||||
|
execute dbms_workload_capture.finish_capture();
|
||||||
|
|
||||||
|
SQL> col name for a12
|
||||||
|
SQL> col status for a10
|
||||||
|
SQL> col dir_path for a25
|
||||||
|
SQL> set lines 300acle/rat';
|
||||||
|
execute dbms_workload_capture.start_capture('RAT_CAPTURE','RAT_WORKLOAD');
|
||||||
|
-- execute Workload
|
||||||
|
execute dbms_workload_capture.finish_capture();
|
||||||
|
|
||||||
|
col name for a12
|
||||||
|
col status for a10
|
||||||
|
col dir_path for a25
|
||||||
|
set lines 300
|
||||||
|
|
||||||
|
select id,name,status,start_time,end_time,connects,user_calls,dir_path from dba_workload_captures
|
||||||
|
where id = (select max(id) from dba_workload_captures) ;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
set pagesize 0 long 30000000 longchunksize 1000
|
||||||
|
select dbms_workload_capture.report(2,'TEXT') from dual;
|
||||||
|
|
||||||
|
export PATH=$ORACLE_HOME/jdk/bin:$PATH
|
||||||
|
java -classpaexport PATH=$ORACLE_HOME/jdk/bin:$PATHth $ORACLE_HOME/jdbc/lib/ojdbc7.jar:$ORACLE_HOME/rdbms/jlib/dbrparser.jar:$ORACLE_HOME/rdbms/jlib/dbranalyzer.jar: oracle.dbreplay.workload.checker.CaptureChecker /home/oracle/rat jdbc:oracle:thin:@taris:1521/KENOBI
|
||||||
139
divers/oracle_autoupgrade_01.txt
Normal file
139
divers/oracle_autoupgrade_01.txt
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
export PATH=/app/oracle/product/21/jdk/bin:$PATH
|
||||||
|
export JAVA_HOME=/app/oracle/product/21/jdk
|
||||||
|
|
||||||
|
java -jar $ORACLE_HOME/rdbms/admin/autoupgrade.jar -version
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
create spfile='/app/oracle/base/admin/WEDGEPRD/spfile/spfileWEDGEPRD.ora' from pfile='/app/oracle/base/admin/WEDGEPRD/pfile/initWEDGEPRD.ora';
|
||||||
|
|
||||||
|
startup nomount;
|
||||||
|
|
||||||
|
rman auxiliary /
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
allocate auxiliary channel aux01 device type disk;
|
||||||
|
allocate auxiliary channel aux02 device type disk;
|
||||||
|
allocate auxiliary channel aux03 device type disk;
|
||||||
|
allocate auxiliary channel aux04 device type disk;
|
||||||
|
allocate auxiliary channel aux05 device type disk;
|
||||||
|
allocate auxiliary channel aux06 device type disk;
|
||||||
|
allocate auxiliary channel aux07 device type disk;
|
||||||
|
allocate auxiliary channel aux08 device type disk;
|
||||||
|
allocate auxiliary channel aux09 device type disk;
|
||||||
|
allocate auxiliary channel aux10 device type disk;
|
||||||
|
duplicate target database to WEDGE backup location '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/11.2.0.4/WEDGE/';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@$ORACLE_HOME/rdbms/admin/catbundle psu apply
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
|
||||||
|
java -jar /mnt/yavin4/tmp/autoupgrade.jar -version
|
||||||
|
java -jar /mnt/yavin4/tmp/autoupgrade.jar -config /home/oracle/myconfig.cfg -clear_recovery_data
|
||||||
|
java -jar /mnt/yavin4/tmp/autoupgrade.jar -config myconfig.cfg -mode analyze
|
||||||
|
java -jar /mnt/yavin4/tmp/autoupgrade.jar -config myconfig.cfg -mode fixups
|
||||||
|
java -jar /mnt/yavin4/tmp/autoupgrade.jar -config myconfig.cfg -mode deploy
|
||||||
|
|
||||||
|
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
global.autoupg_log_dir=/home/oracle
|
||||||
|
|
||||||
|
upg1.sid=WEDGEPRD # ORACLE_SID of the source DB/CDB
|
||||||
|
upg1.source_home=/app/oracle/product/11.2 # Path of the source ORACLE_HOME
|
||||||
|
upg1.target_home=/app/oracle/product/19 # Path of the target ORACLE_HOME
|
||||||
|
upg1.start_time=NOW # Optional. [NOW | +XhYm (X hours, Y minutes after launch) | dd/mm/yyyy hh:mm:ss]
|
||||||
|
upg1.upgrade_node=taris.swgalaxy # Optional. To find out the name of your node, run the hostname utility. Default is 'localhost'
|
||||||
|
upg1.run_utlrp=yes # Optional. Whether or not to run utlrp after upgrade
|
||||||
|
upg1.timezone_upg=yes # Optional. Whether or not to run the timezone upgrade
|
||||||
|
upg1.target_version=19 # Oracle version of the target ORACLE_HOME. Only required when the target Oracle database version is 12.2
|
||||||
|
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
create spfile='/app/oracle/base/admin/ASTYPRD/spfile/spfileASTYPRD.ora' from pfile='/app/oracle/base/admin/ASTYPRD/pfile/initASTYPRD.ora';
|
||||||
|
|
||||||
|
startup nomount;
|
||||||
|
|
||||||
|
rman auxiliary /
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
allocate auxiliary channel aux01 device type disk;
|
||||||
|
allocate auxiliary channel aux02 device type disk;
|
||||||
|
allocate auxiliary channel aux03 device type disk;
|
||||||
|
allocate auxiliary channel aux04 device type disk;
|
||||||
|
allocate auxiliary channel aux05 device type disk;
|
||||||
|
allocate auxiliary channel aux06 device type disk;
|
||||||
|
allocate auxiliary channel aux07 device type disk;
|
||||||
|
allocate auxiliary channel aux08 device type disk;
|
||||||
|
allocate auxiliary channel aux09 device type disk;
|
||||||
|
allocate auxiliary channel aux10 device type disk;
|
||||||
|
duplicate target database to ASTY backup location '/mnt/yavin4/tmp/_oracle_/orabackup/_keep_/Standalone/19.11/ASTY/';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
cd $ORACLE_HOME/OPatch
|
||||||
|
./datapatch
|
||||||
|
|
||||||
|
@$ORACLE_HOME/rdbms/admin/utlrp
|
||||||
|
|
||||||
|
|
||||||
|
global.autoupg_log_dir=/home/oracle
|
||||||
|
|
||||||
|
#
|
||||||
|
# Database number 3 - Noncdb to PDB upgrade
|
||||||
|
#
|
||||||
|
upg3.sid=WEDGEPRD
|
||||||
|
upg3.source_home=/app/oracle/product/11.2
|
||||||
|
upg3.target_cdb=ASTYPRD
|
||||||
|
upg3.target_home=/app/oracle/product/19
|
||||||
|
upg3.target_pdb_name=PDBWEDGEPRD
|
||||||
|
upg3.start_time=NOW # Optional. 10 Minutes from now
|
||||||
|
upg3.upgrade_node=localhost # Optional. To find out the name of your node, run the hostname utility. Default is 'localhost'
|
||||||
|
upg3.run_utlrp=yes # Optional. Whether or not to run utlrp after upgrade
|
||||||
|
upg3.timezone_upg=yes # Optional. Whether or not to run the timezone upgrade
|
||||||
|
|
||||||
|
|
||||||
|
rman target /
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
set nocfau;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/ASTYPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/ASTYPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/ASTYPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/ASTYPRD/%d_%U_%s_%t.bck';
|
||||||
|
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||||
|
release channel ch01;
|
||||||
|
release channel ch02;
|
||||||
|
release channel ch03;
|
||||||
|
release channel ch04;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/ASTYPRD/%d_%U_%s_%t.controlfile';
|
||||||
|
backup current controlfile;
|
||||||
|
release channel ch01;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
run
|
||||||
|
{
|
||||||
|
set nocfau;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/WEDGEPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/WEDGEPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/WEDGEPRD/%d_%U_%s_%t.bck';
|
||||||
|
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/WEDGEPRD/%d_%U_%s_%t.bck';
|
||||||
|
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||||
|
release channel ch01;
|
||||||
|
release channel ch02;
|
||||||
|
release channel ch03;
|
||||||
|
release channel ch04;
|
||||||
|
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/upgrade1/WEDGEPRD/%d_%U_%s_%t.controlfile';
|
||||||
|
backup current controlfile;
|
||||||
|
release channel ch01;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
43
divers/oracle_clone_using_lv_snap_01.txt
Normal file
43
divers/oracle_clone_using_lv_snap_01.txt
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
On Dom0:
|
||||||
|
|
||||||
|
qemu-img create -f raw /vm/hdd0/kamino/data_03.img 20G
|
||||||
|
|
||||||
|
chown qemu:qemu /vm/hdd0/kamino/data_03.img
|
||||||
|
chmod 600 /vm/hdd0/kamino/data_03.img
|
||||||
|
|
||||||
|
virsh attach-disk kamino /vm/hdd0/kamino/data_03.img vdh --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||||
|
|
||||||
|
|
||||||
|
qemu-img create -f raw /vm/hdd0/kamino/fra_02.img 20G
|
||||||
|
|
||||||
|
chown qemu:qemu /vm/hdd0/kamino/fra_02.img
|
||||||
|
chmod 600 /vm/hdd0/kamino/fra_02.img
|
||||||
|
|
||||||
|
virsh attach-disk kamino /vm/hdd0/kamino/fra_02.img vdi --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||||
|
|
||||||
|
|
||||||
|
On VM:
|
||||||
|
|
||||||
|
pvcreate /dev/vdh
|
||||||
|
pvcreate /dev/vdi
|
||||||
|
|
||||||
|
vgextend vg_data /dev/vdh
|
||||||
|
vgextend vg_fra /dev/vdi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
lvcreate -s -n lv_snap_data -L 19G vg_data/lv_data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
mkdir /snap_data /snap_fra
|
||||||
|
|
||||||
|
# On XFS add options: rw,nouuid, otherwise the mount fails
|
||||||
|
mount -o rw,nouuid /dev/vg_data/lv_snap_data /snap_data
|
||||||
|
|
||||||
|
# try: xfs_admin -U generate /dev/vg_data/lv_snap_data
|
||||||
|
|
||||||
|
umount /snap_data
|
||||||
|
lvremove vg_data/lv_snap_data
|
||||||
|
|
||||||
128
divers/oracle_ressource_manager_01.txt
Normal file
128
divers/oracle_ressource_manager_01.txt
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
https://oracle-base.com/articles/8i/resource-manager-8i
|
||||||
|
|
||||||
|
|
||||||
|
orapwd file=orapwHUTTPRD password="Urezesf7754#hhY7711#ab?"
|
||||||
|
|
||||||
|
|
||||||
|
create pluggable database DURGA admin user HUTTMASTER identified by "Ngfsf554#hhAZAR1#10!";
|
||||||
|
alter pluggable database DURGA open;
|
||||||
|
alter pluggable database DURGA save state;
|
||||||
|
|
||||||
|
alter session set container=DURGA;
|
||||||
|
show con_name
|
||||||
|
grant sysdba to starkiller identified by "VvvAv0332#00911HsqeZA?";
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
alias HUTTPRD='rlwrap sqlplus sys/"Urezesf7754#hhY7711#ab?"@taris/HUTTPRD as sysdba'
|
||||||
|
alias DURGA='rlwrap sqlplus starkiller/"VvvAv0332#00911HsqeZA?"@taris/DURGA as sysdba'
|
||||||
|
|
||||||
|
|
||||||
|
CREATE USER web_user identified by "iN_j8sC#d!kX6b:_";
|
||||||
|
CREATE USER batch_user identified by "r~65ktuFYyds+P_X";
|
||||||
|
|
||||||
|
grant connect,resource to web_user;
|
||||||
|
grant connect,resource to batch_user;
|
||||||
|
|
||||||
|
# create a pending area.
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.clear_pending_area;
|
||||||
|
DBMS_RESOURCE_MANAGER.create_pending_area;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
# create a plan
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan(
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
comment => 'Plan for a combination of high and low priority tasks.');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
Create a web and a batch consumer group
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||||
|
consumer_group => 'WEB_CG',
|
||||||
|
comment => 'Web based OTLP processing - high priority');
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||||
|
consumer_group => 'BATCH_CG',
|
||||||
|
comment => 'Batch processing - low priority');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
# assign the consumer groups to the plan and indicate their relative priority, remembering to add the OTHER_GROUPS plan directive.
|
||||||
|
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'web_cg',
|
||||||
|
comment => 'High Priority',
|
||||||
|
cpu_p1 => 80,
|
||||||
|
cpu_p2 => 0,
|
||||||
|
parallel_degree_limit_p1 => 4);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'batch_cg',
|
||||||
|
comment => 'Low Priority',
|
||||||
|
cpu_p1 => 0,
|
||||||
|
cpu_p2 => 80,
|
||||||
|
parallel_degree_limit_p1 => 4);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.create_plan_directive(
|
||||||
|
plan => 'hybrid_plan',
|
||||||
|
group_or_subplan => 'OTHER_GROUPS',
|
||||||
|
comment => 'all other users - level 3',
|
||||||
|
cpu_p1 => 0,
|
||||||
|
cpu_p2 => 0,
|
||||||
|
cpu_p3 => 100);
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
# validate and apply the resource plan.
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_RESOURCE_MANAGER.validate_pending_area;
|
||||||
|
DBMS_RESOURCE_MANAGER.submit_pending_area;
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
# assign our users to individual consumer groups.
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
-- Assign users to consumer groups
|
||||||
|
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||||
|
grantee_name => 'web_user',
|
||||||
|
consumer_group => 'web_cg',
|
||||||
|
grant_option => FALSE);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||||
|
grantee_name => 'batch_user',
|
||||||
|
consumer_group => 'batch_cg',
|
||||||
|
grant_option => FALSE);
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('web_user', 'web_cg');
|
||||||
|
|
||||||
|
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('batch_user', 'batch_cg');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
connect web_user/"iN_j8sC#d!kX6b:_"@taris/DURGA
|
||||||
|
connect batch_user/"r~65ktuFYyds+P_X"@taris/DURGA
|
||||||
|
|
||||||
|
SELECT username, resource_consumer_group
|
||||||
|
FROM v$session
|
||||||
|
WHERE username IN ('WEB_USER','BATCH_USER');
|
||||||
|
|
||||||
|
|
||||||
|
ALTER SYSTEM SET RESOURCE_MANAGER_PLAN = 'hybrid_plan';
|
||||||
|
|
||||||
62
divers/sql_quarantine_01.md
Normal file
62
divers/sql_quarantine_01.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
> [Original article](https://oracle-base.com/articles/19c/sql-quarantine-19c)
|
||||||
|
|
||||||
|
We can manually quarantine a statement based on SQL_ID or SQL_TEXT.
|
||||||
|
Both methods accept a PLAN_HASH_VALUE parameter, which allows us to quarantine a single execution plan.
|
||||||
|
If this is not specified, all execution plans for the statement are quarantined.
|
||||||
|
|
||||||
|
|
||||||
|
-- Quarantine all execution plans for a SQL_ID.
|
||||||
|
DECLARE
|
||||||
|
l_sql_quarantine VARCHAR2(100);
|
||||||
|
BEGIN
|
||||||
|
l_sql_quarantine := sys.DBMS_SQLQ.create_quarantine_by_sql_id(
|
||||||
|
sql_id => 'gs59hr0xtjrf8'
|
||||||
|
);
|
||||||
|
DBMS_OUTPUT.put_line('l_sql_quarantine=' || l_sql_quarantine);
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
SQL quarantine display:
|
||||||
|
|
||||||
|
set lines 256
|
||||||
|
COLUMN sql_text FORMAT A50 TRUNC
|
||||||
|
COLUMN plan_hash_value FORMAT 999999999999
|
||||||
|
COLUMN name FORMAT A30
|
||||||
|
COLUMN enabled FORMAT A3 HEAD "Ena"
|
||||||
|
COLUMN cpu_time FORMAT A10
|
||||||
|
COLUMN io_megabytes FORMAT A10
|
||||||
|
COLUMN io_requests FORMAT A10
|
||||||
|
COLUMN elapsed_time FORMAT A10
|
||||||
|
COLUMN io_logical FORMAT A10
|
||||||
|
|
||||||
|
select
|
||||||
|
name, enabled,cpu_time, io_megabytes, io_requests, elapsed_time, io_logical, plan_hash_value, sql_text
|
||||||
|
from
|
||||||
|
dba_sql_quarantine;
|
||||||
|
|
||||||
|
|
||||||
|
The ALTER_QUARANTINE procedure allows us to alter the thresholds, to make them look more like automatically generated quarantines.
|
||||||
|
We can use the procedure to alter the following parameters:
|
||||||
|
|
||||||
|
- CPU_TIME
|
||||||
|
- ELAPSED_TIME
|
||||||
|
- IO_MEGABYTES
|
||||||
|
- IO_REQUESTS
|
||||||
|
- IO_LOGICAL
|
||||||
|
- ENABLED
|
||||||
|
- AUTOPURGE
|
||||||
|
|
||||||
|
Example of setting the CPU_TIME threshold for the manually created quarantines:
|
||||||
|
|
||||||
|
BEGIN
|
||||||
|
DBMS_SQLQ.alter_quarantine(
|
||||||
|
quarantine_name => 'SQL_QUARANTINE_8zpc9pwdmb8vr',
|
||||||
|
parameter_name => 'CPU_TIME',
|
||||||
|
parameter_value => '1');
|
||||||
|
END;
|
||||||
|
/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
30
divers/tmp_script.sql
Normal file
30
divers/tmp_script.sql
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
CREATE TABLE exemple_table (
|
||||||
|
col1 INTEGER,
|
||||||
|
col2 INTEGER,
|
||||||
|
col3 INTEGER,
|
||||||
|
col4 VARCHAR2(20)
|
||||||
|
);
|
||||||
|
|
||||||
|
DECLARE
|
||||||
|
v_col1 INTEGER;
|
||||||
|
v_col2 INTEGER;
|
||||||
|
v_col3 INTEGER;
|
||||||
|
v_col4 VARCHAR2(20);
|
||||||
|
BEGIN
|
||||||
|
FOR i IN 1..1000000 LOOP
|
||||||
|
v_col1 := TRUNC(DBMS_RANDOM.VALUE(1, 1000));
|
||||||
|
v_col2 := TRUNC(DBMS_RANDOM.VALUE(1, 1000));
|
||||||
|
v_col3 := TRUNC(DBMS_RANDOM.VALUE(1, 1000));
|
||||||
|
v_col4 := DBMS_RANDOM.STRING('U', 10); -- 10 caractères aléatoires en majuscules
|
||||||
|
|
||||||
|
INSERT INTO exemple_table (col1, col2, col3, col4)
|
||||||
|
VALUES (v_col1, v_col2, v_col3, v_col4);
|
||||||
|
|
||||||
|
-- Commit toutes les 10 000 lignes pour éviter les problèmes de mémoire
|
||||||
|
IF MOD(i, 10000) = 0 THEN
|
||||||
|
COMMIT;
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
COMMIT;
|
||||||
|
END;
|
||||||
|
/
|
||||||
19
divers/vminfo.txt
Normal file
19
divers/vminfo.txt
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
bakura Oracle
|
||||||
|
togoria Oracle
|
||||||
|
wayland Oracle
|
||||||
|
|
||||||
|
exegol OEL9 - ogg21
|
||||||
|
helska OEL9 - ogg21
|
||||||
|
|
||||||
|
# raxus PostgreSQL
|
||||||
|
belasco PostgreSQL
|
||||||
|
|
||||||
|
jakku Windows 7 - Ciel Compta
|
||||||
|
utapau Windows 11 (vplesnila/secret)
|
||||||
|
|
||||||
|
seedmachine Rocky Linux 9 generic VM
|
||||||
|
ivera Rocky Linux 9 - docker
|
||||||
|
|
||||||
|
adega Windows Server 2022 SE vplesnila/Secret00!
|
||||||
|
atrisia Windows Server 2022 SE vplesnila/Secret00!
|
||||||
|
|
||||||
5
push_all
Normal file
5
push_all
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
NOW=$(date -u +"%Y-%m-%d %H:%M:%S" )
|
||||||
|
git add .
|
||||||
|
git commit -m "${NOW}"
|
||||||
|
git push -u origin main
|
||||||
|
|
||||||
0
python/01.py
Normal file
0
python/01.py
Normal file
128
python/DomainUpdater/DomainUpdater.2019-07-01
Normal file
128
python/DomainUpdater/DomainUpdater.2019-07-01
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
#!/app/python3/bin/python3
|
||||||
|
|
||||||
|
# vplesnila 2019-06-24: creation
|
||||||
|
# vplesnila 2019-06-25: build self.subdomains_list
|
||||||
|
# vplesnila 2019-07-01: finalize update subdomains procedure and email sending
|
||||||
|
|
||||||
|
import os
|
||||||
|
import smtplib
|
||||||
|
from email.message import EmailMessage
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
import socket
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# LOGGING initialization function
|
||||||
|
def start_logging(logfile):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a file handler
|
||||||
|
handler = logging.FileHandler(logfile)
|
||||||
|
handler.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a logging format
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# add the handlers to the logger
|
||||||
|
logger.addHandler(handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
class DomainUpdater:
|
||||||
|
def __init__(self, rootdir):
|
||||||
|
self.rootdir = rootdir
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
with open(self.rootdir + "/" + "DomainUpdater.conf", "r") as f:
|
||||||
|
self.config = json.load(f)
|
||||||
|
|
||||||
|
# Get current IP
|
||||||
|
r = requests.get(self.config["get_current_ip_api_url"])
|
||||||
|
self.current_ip = r.json()["ip"]
|
||||||
|
logger.info("__BEGIN_BATCH__")
|
||||||
|
logger.info("Current IP is " + self.current_ip)
|
||||||
|
|
||||||
|
# Build subdomain list as a ist of (subdomain name, domain_id, subdomain_id)
|
||||||
|
self.subdomains_list = []
|
||||||
|
|
||||||
|
self.auth = (self.config["dnsmanager_id"], self.config["dnsmanager_key"])
|
||||||
|
self.url_base = self.config["dnsmanager_api_url_base"]
|
||||||
|
|
||||||
|
# Get domains dictionary
|
||||||
|
url_end = "/user/domains"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
domain_dict = r.json()["results"]
|
||||||
|
|
||||||
|
for domain in domain_dict:
|
||||||
|
domain_name = domain["domain"]
|
||||||
|
domain_id = domain["id"]
|
||||||
|
url_end = "/user/domain/" + str(domain["id"]) + "/records"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
records_dict = r.json()["results"]
|
||||||
|
for record_dict in records_dict:
|
||||||
|
if record_dict["type"] == "A":
|
||||||
|
# Subdomain
|
||||||
|
subdomain_id = record_dict["id"]
|
||||||
|
subdomain_name = record_dict["name"]
|
||||||
|
subdomain_ip = record_dict["content"]
|
||||||
|
fqdn = "%s.%s" % (subdomain_name, domain_name)
|
||||||
|
record = (fqdn, domain_id, subdomain_id, subdomain_ip)
|
||||||
|
self.subdomains_list.append(record)
|
||||||
|
return
|
||||||
|
|
||||||
|
def send_email_new_ip(self):
|
||||||
|
msg = EmailMessage()
|
||||||
|
msg["Subject"] = "Your public IP changed"
|
||||||
|
msg["From"] = "domain-updater@databasepro.fr"
|
||||||
|
msg["To"] = "vplesnila@gmail.com"
|
||||||
|
body = """
|
||||||
|
Hello,
|
||||||
|
You have a new public IP: %s
|
||||||
|
Following subdomains has been updated:
|
||||||
|
%s
|
||||||
|
--------------
|
||||||
|
Domain Updater
|
||||||
|
""" % (self.current_ip, "\n".join(self.updated_subdomain_list))
|
||||||
|
msg.set_content(body)
|
||||||
|
s = smtplib.SMTP("localhost")
|
||||||
|
s.send_message(msg)
|
||||||
|
s.quit()
|
||||||
|
logger.info("Email sent to " + msg["To"])
|
||||||
|
return
|
||||||
|
|
||||||
|
def check_subdomains(self):
|
||||||
|
self.updated_subdomain_list=[]
|
||||||
|
for record in self.subdomains_list:
|
||||||
|
(fqdn, domain_id, subdomain_id, subdomain_ip) = record
|
||||||
|
if (subdomain_ip == self.current_ip):
|
||||||
|
logger.info(fqdn + " already set to " + subdomain_ip + ", nothing to do")
|
||||||
|
else:
|
||||||
|
logger.info("Updatting " + fqdn + " with the new IP value " + self.current_ip)
|
||||||
|
self.update_subdomain(domain_id, subdomain_id)
|
||||||
|
self.updated_subdomain_list.append(fqdn)
|
||||||
|
|
||||||
|
logger.info("__END_BATCH__")
|
||||||
|
return
|
||||||
|
|
||||||
|
def update_subdomain(self, domain_id, subdomain_id):
|
||||||
|
url_end = "/user/domain/" + str(domain_id) + "/record/" + str(subdomain_id)
|
||||||
|
data = json.dumps({"id":subdomain_id, "content":self.current_ip})
|
||||||
|
headers = { 'Content-Type': 'application/json'}
|
||||||
|
r = requests.put(self.url_base + url_end, auth = self.auth, data = data, headers=headers)
|
||||||
|
return
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
script_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
script_name = os.path.basename(__file__)
|
||||||
|
logger = start_logging(script_path + '/DomainUpdater.log')
|
||||||
|
domainupdater = DomainUpdater(script_path)
|
||||||
|
domainupdater.check_subdomains()
|
||||||
|
if len(domainupdater.updated_subdomain_list) > 0:
|
||||||
|
domainupdater.send_email_new_ip()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
9
python/DomainUpdater/DomainUpdater.conf
Normal file
9
python/DomainUpdater/DomainUpdater.conf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"email_from": "domainupdater@databasepro.eu",
|
||||||
|
"email_to": "vplesnila@gmail.com",
|
||||||
|
"get_current_ip_api_url": "https://api.ipify.org?format=json",
|
||||||
|
"dnsmanager_api_url_base":"https://app.dnsmanager.io/api/v1",
|
||||||
|
"dnsmanager_id":"9422ac9d-2c62-4967-ae12-c1d15bbbe200",
|
||||||
|
"dnsmanager_key":"I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy",
|
||||||
|
"subdomain_list":"ssh.databasepro.fr,code.databasepro.fr,sabnzbd.databasepro.eu,sabnzbd.databasepro.fr,public.databasepro.fr,support.databasepro.fr"
|
||||||
|
}
|
||||||
8
python/DomainUpdater/DomainUpdater.conf.2020-12-23
Normal file
8
python/DomainUpdater/DomainUpdater.conf.2020-12-23
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"email_from": "domainupdater@databasepro.fr",
|
||||||
|
"email_to": "vplesnila@gmail.com",
|
||||||
|
"get_current_ip_api_url": "https://api.ipify.org?format=json",
|
||||||
|
"dnsmanager_api_url_base":"https://app.dnsmanager.io/api/v1",
|
||||||
|
"dnsmanager_id":"9422ac9d-2c62-4967-ae12-c1d15bbbe200",
|
||||||
|
"dnsmanager_key":"I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy"
|
||||||
|
}
|
||||||
9
python/DomainUpdater/DomainUpdater.conf.2021-01-15
Normal file
9
python/DomainUpdater/DomainUpdater.conf.2021-01-15
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"email_from": "domainupdater@databasepro.fr",
|
||||||
|
"email_to": "vplesnila@gmail.com",
|
||||||
|
"get_current_ip_api_url": "https://api.ipify.org?format=json",
|
||||||
|
"dnsmanager_api_url_base":"https://app.dnsmanager.io/api/v1",
|
||||||
|
"dnsmanager_id":"9422ac9d-2c62-4967-ae12-c1d15bbbe200",
|
||||||
|
"dnsmanager_key":"I9HV2Jqp1gFqMuic3zPRYW5guSQEvoyy",
|
||||||
|
"subdomain_list":"None.databasepro.fr,ssh.databasepro.fr"
|
||||||
|
}
|
||||||
147
python/DomainUpdater/DomainUpdater.py
Normal file
147
python/DomainUpdater/DomainUpdater.py
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# vplesnila 2019-06-24: creation
|
||||||
|
# vplesnila 2019-06-25: build self.subdomains_list
|
||||||
|
# vplesnila 2019-07-01: finalize update subdomains procedure and email sending
|
||||||
|
# vplesnila 2020-12-24: add subdomain list in config file to allow updating only a subset of dnsmanager.io registered subdomains
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
import smtplib
|
||||||
|
from email.message import EmailMessage
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
|
||||||
|
|
||||||
|
# LOGGING initialization function
|
||||||
|
def start_logging(logfile):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a file handler
|
||||||
|
handler = logging.FileHandler(logfile)
|
||||||
|
handler.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a logging format
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# add the handlers to the logger
|
||||||
|
logger.addHandler(handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
class DomainUpdater:
|
||||||
|
def __init__(self, rootdir):
|
||||||
|
self.rootdir = rootdir
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
with open(self.rootdir + "/" + "DomainUpdater.conf", "r") as f:
|
||||||
|
self.config = json.load(f)
|
||||||
|
|
||||||
|
# Get current IP
|
||||||
|
r = requests.get(self.config["get_current_ip_api_url"])
|
||||||
|
self.current_ip = r.json()["ip"]
|
||||||
|
logger.info("Current public IP is " + self.current_ip)
|
||||||
|
|
||||||
|
# Build subdomain list as a ist of (subdomain name, domain_id, subdomain_id)
|
||||||
|
self.subdomains_list = []
|
||||||
|
|
||||||
|
self.auth = (self.config["dnsmanager_id"], self.config["dnsmanager_key"])
|
||||||
|
self.url_base = self.config["dnsmanager_api_url_base"]
|
||||||
|
|
||||||
|
# Get domains dictionary
|
||||||
|
url_end = "/user/domains"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
domain_dict = r.json()["results"]
|
||||||
|
|
||||||
|
# Get fqdn list
|
||||||
|
self.fqdn_list = self.config["subdomain_list"].split(",")
|
||||||
|
logger.info("Subdomains list: " + ",".join(self.fqdn_list))
|
||||||
|
|
||||||
|
self.fqdn_to_update = []
|
||||||
|
for domain in domain_dict:
|
||||||
|
domain_name = domain["domain"]
|
||||||
|
domain_id = domain["id"]
|
||||||
|
url_end = "/user/domain/" + str(domain["id"]) + "/records"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
records_dict = r.json()["results"]
|
||||||
|
for record_dict in records_dict:
|
||||||
|
if record_dict["type"] == "A":
|
||||||
|
# Subdomain
|
||||||
|
subdomain_id = record_dict["id"]
|
||||||
|
subdomain_name = record_dict["name"]
|
||||||
|
subdomain_ip = record_dict["content"]
|
||||||
|
fqdn = "%s.%s" % (subdomain_name, domain_name)
|
||||||
|
record = (fqdn, domain_id, subdomain_id, subdomain_ip)
|
||||||
|
if (fqdn in self.fqdn_list):
|
||||||
|
self.fqdn_to_update.append(fqdn)
|
||||||
|
self.subdomains_list.append(record)
|
||||||
|
|
||||||
|
logger.info("Updating subdomain(s): " + ",".join(self.fqdn_to_update))
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def send_email_new_ip(self):
|
||||||
|
SMTPserver = 'smtp.orange.fr'
|
||||||
|
sender = 'Domain-Updater@databasepro.fr'
|
||||||
|
destination = ['vplesnila@gmail.com']
|
||||||
|
USERNAME = "plesnila.valeriu@orange.fr"
|
||||||
|
PASSWORD = "ch1tzch1tz"
|
||||||
|
|
||||||
|
msg = EmailMessage()
|
||||||
|
msg["Subject"] = "Your public IP changed"
|
||||||
|
msg["From"] = sender
|
||||||
|
msg["To"] = destination
|
||||||
|
body = """
|
||||||
|
Hello,
|
||||||
|
You have a new public IP: %s
|
||||||
|
Following subdomains has been updated: %s
|
||||||
|
--------------
|
||||||
|
Domain-Updater
|
||||||
|
""" % (self.current_ip, ", ".join(self.updated_subdomain_list))
|
||||||
|
msg.set_content(body)
|
||||||
|
s = smtplib.SMTP(host = SMTPserver, port= 587)
|
||||||
|
s.set_debuglevel(False)
|
||||||
|
s.login(USERNAME, PASSWORD)
|
||||||
|
s.send_message(msg)
|
||||||
|
s.quit()
|
||||||
|
logger.info("Email sent to " + msg["To"])
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def check_subdomains(self):
|
||||||
|
self.updated_subdomain_list=[]
|
||||||
|
for record in self.subdomains_list:
|
||||||
|
(fqdn, domain_id, subdomain_id, subdomain_ip) = record
|
||||||
|
if (subdomain_ip == self.current_ip):
|
||||||
|
logger.info(fqdn + " already set to " + subdomain_ip + ", nothing to do")
|
||||||
|
else:
|
||||||
|
logger.info("Updating " + fqdn + " with the new IP value " + self.current_ip)
|
||||||
|
self.update_subdomain(domain_id, subdomain_id)
|
||||||
|
self.updated_subdomain_list.append(fqdn)
|
||||||
|
return
|
||||||
|
|
||||||
|
def update_subdomain(self, domain_id, subdomain_id):
|
||||||
|
url_end = "/user/domain/" + str(domain_id) + "/record/" + str(subdomain_id)
|
||||||
|
data = json.dumps({"id":subdomain_id, "content":self.current_ip})
|
||||||
|
headers = { 'Content-Type': 'application/json'}
|
||||||
|
r = requests.put(self.url_base + url_end, auth = self.auth, data = data, headers=headers)
|
||||||
|
return
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
script_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
script_name = os.path.basename(__file__)
|
||||||
|
logger = start_logging(script_path + '/DomainUpdater.log')
|
||||||
|
logger.info("__BEGIN_BATCH__")
|
||||||
|
domainupdater = DomainUpdater(script_path)
|
||||||
|
domainupdater.check_subdomains()
|
||||||
|
if len(domainupdater.updated_subdomain_list) > 0:
|
||||||
|
domainupdater.send_email_new_ip()
|
||||||
|
logger.info("__END_BATCH__")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
126
python/DomainUpdater/DomainUpdater.py.2020-12-23
Normal file
126
python/DomainUpdater/DomainUpdater.py.2020-12-23
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
#!/app/python/current_version/bin/python3
|
||||||
|
|
||||||
|
# vplesnila 2019-06-24: creation
|
||||||
|
# vplesnila 2019-06-25: build self.subdomains_list
|
||||||
|
# vplesnila 2019-07-01: finalize update subdomains procedure and email sending
|
||||||
|
|
||||||
|
import os
|
||||||
|
import smtplib
|
||||||
|
from email.message import EmailMessage
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
import socket
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# LOGGING initialization function
|
||||||
|
def start_logging(logfile):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a file handler
|
||||||
|
handler = logging.FileHandler(logfile)
|
||||||
|
handler.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a logging format
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# add the handlers to the logger
|
||||||
|
logger.addHandler(handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
class DomainUpdater:
|
||||||
|
def __init__(self, rootdir):
|
||||||
|
self.rootdir = rootdir
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
with open(self.rootdir + "/" + "DomainUpdater.conf", "r") as f:
|
||||||
|
self.config = json.load(f)
|
||||||
|
|
||||||
|
# Get current IP
|
||||||
|
r = requests.get(self.config["get_current_ip_api_url"])
|
||||||
|
self.current_ip = r.json()["ip"]
|
||||||
|
logger.info("Current IP is " + self.current_ip)
|
||||||
|
|
||||||
|
# Build subdomain list as a ist of (subdomain name, domain_id, subdomain_id)
|
||||||
|
self.subdomains_list = []
|
||||||
|
|
||||||
|
self.auth = (self.config["dnsmanager_id"], self.config["dnsmanager_key"])
|
||||||
|
self.url_base = self.config["dnsmanager_api_url_base"]
|
||||||
|
|
||||||
|
# Get domains dictionary
|
||||||
|
url_end = "/user/domains"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
domain_dict = r.json()["results"]
|
||||||
|
|
||||||
|
for domain in domain_dict:
|
||||||
|
domain_name = domain["domain"]
|
||||||
|
domain_id = domain["id"]
|
||||||
|
url_end = "/user/domain/" + str(domain["id"]) + "/records"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
records_dict = r.json()["results"]
|
||||||
|
for record_dict in records_dict:
|
||||||
|
if record_dict["type"] == "A":
|
||||||
|
# Subdomain
|
||||||
|
subdomain_id = record_dict["id"]
|
||||||
|
subdomain_name = record_dict["name"]
|
||||||
|
subdomain_ip = record_dict["content"]
|
||||||
|
fqdn = "%s.%s" % (subdomain_name, domain_name)
|
||||||
|
record = (fqdn, domain_id, subdomain_id, subdomain_ip)
|
||||||
|
self.subdomains_list.append(record)
|
||||||
|
return
|
||||||
|
|
||||||
|
def send_email_new_ip(self):
|
||||||
|
msg = EmailMessage()
|
||||||
|
msg["Subject"] = "Your public IP changed"
|
||||||
|
msg["From"] = "domain-updater@databasepro.fr"
|
||||||
|
msg["To"] = "vplesnila@gmail.com"
|
||||||
|
body = """
|
||||||
|
Hello,
|
||||||
|
You have a new public IP: %s
|
||||||
|
Following subdomains has been updated: %s
|
||||||
|
--------------
|
||||||
|
Domain Updater
|
||||||
|
""" % (self.current_ip, ", ".join(self.updated_subdomain_list))
|
||||||
|
msg.set_content(body)
|
||||||
|
s = smtplib.SMTP("localhost")
|
||||||
|
s.send_message(msg)
|
||||||
|
s.quit()
|
||||||
|
logger.info("Email sent to " + msg["To"])
|
||||||
|
return
|
||||||
|
|
||||||
|
def check_subdomains(self):
|
||||||
|
self.updated_subdomain_list=[]
|
||||||
|
for record in self.subdomains_list:
|
||||||
|
(fqdn, domain_id, subdomain_id, subdomain_ip) = record
|
||||||
|
if (subdomain_ip == self.current_ip):
|
||||||
|
logger.info(fqdn + " already set to " + subdomain_ip + ", nothing to do")
|
||||||
|
else:
|
||||||
|
logger.info("Updating " + fqdn + " with the new IP value " + self.current_ip)
|
||||||
|
self.update_subdomain(domain_id, subdomain_id)
|
||||||
|
self.updated_subdomain_list.append(fqdn)
|
||||||
|
return
|
||||||
|
|
||||||
|
def update_subdomain(self, domain_id, subdomain_id):
|
||||||
|
url_end = "/user/domain/" + str(domain_id) + "/record/" + str(subdomain_id)
|
||||||
|
data = json.dumps({"id":subdomain_id, "content":self.current_ip})
|
||||||
|
headers = { 'Content-Type': 'application/json'}
|
||||||
|
r = requests.put(self.url_base + url_end, auth = self.auth, data = data, headers=headers)
|
||||||
|
return
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
script_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
script_name = os.path.basename(__file__)
|
||||||
|
logger = start_logging(script_path + '/DomainUpdater.log')
|
||||||
|
logger.info("__BEGIN_BATCH__")
|
||||||
|
domainupdater = DomainUpdater(script_path)
|
||||||
|
domainupdater.check_subdomains()
|
||||||
|
if len(domainupdater.updated_subdomain_list) > 0:
|
||||||
|
domainupdater.send_email_new_ip()
|
||||||
|
logger.info("__END_BATCH__")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
136
python/DomainUpdater/DomainUpdater.py.2021-12-05
Normal file
136
python/DomainUpdater/DomainUpdater.py.2021-12-05
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# vplesnila 2019-06-24: creation
|
||||||
|
# vplesnila 2019-06-25: build self.subdomains_list
|
||||||
|
# vplesnila 2019-07-01: finalize update subdomains procedure and email sending
|
||||||
|
# vplesnila 2020-12-24: add subdomain list in config file to allow updating only a subset of dnsmanager.io registered subdomains
|
||||||
|
|
||||||
|
import os
|
||||||
|
import smtplib
|
||||||
|
from email.message import EmailMessage
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
import socket
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# LOGGING initialization function
|
||||||
|
def start_logging(logfile):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a file handler
|
||||||
|
handler = logging.FileHandler(logfile)
|
||||||
|
handler.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# create a logging format
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# add the handlers to the logger
|
||||||
|
logger.addHandler(handler)
|
||||||
|
return logger
|
||||||
|
|
||||||
|
class DomainUpdater:
|
||||||
|
def __init__(self, rootdir):
|
||||||
|
self.rootdir = rootdir
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
with open(self.rootdir + "/" + "DomainUpdater.conf", "r") as f:
|
||||||
|
self.config = json.load(f)
|
||||||
|
|
||||||
|
# Get current IP
|
||||||
|
r = requests.get(self.config["get_current_ip_api_url"])
|
||||||
|
self.current_ip = r.json()["ip"]
|
||||||
|
logger.info("Current public IP is " + self.current_ip)
|
||||||
|
|
||||||
|
# Build subdomain list as a ist of (subdomain name, domain_id, subdomain_id)
|
||||||
|
self.subdomains_list = []
|
||||||
|
|
||||||
|
self.auth = (self.config["dnsmanager_id"], self.config["dnsmanager_key"])
|
||||||
|
self.url_base = self.config["dnsmanager_api_url_base"]
|
||||||
|
|
||||||
|
# Get domains dictionary
|
||||||
|
url_end = "/user/domains"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
domain_dict = r.json()["results"]
|
||||||
|
|
||||||
|
# Get fqdn list
|
||||||
|
self.fqdn_list = self.config["subdomain_list"].split(",")
|
||||||
|
logger.info("Subdomains list: " + ",".join(self.fqdn_list))
|
||||||
|
|
||||||
|
self.fqdn_to_update = []
|
||||||
|
for domain in domain_dict:
|
||||||
|
domain_name = domain["domain"]
|
||||||
|
domain_id = domain["id"]
|
||||||
|
url_end = "/user/domain/" + str(domain["id"]) + "/records"
|
||||||
|
r = requests.get(self.url_base + url_end, auth = self.auth)
|
||||||
|
records_dict = r.json()["results"]
|
||||||
|
for record_dict in records_dict:
|
||||||
|
if record_dict["type"] == "A":
|
||||||
|
# Subdomain
|
||||||
|
subdomain_id = record_dict["id"]
|
||||||
|
subdomain_name = record_dict["name"]
|
||||||
|
subdomain_ip = record_dict["content"]
|
||||||
|
fqdn = "%s.%s" % (subdomain_name, domain_name)
|
||||||
|
record = (fqdn, domain_id, subdomain_id, subdomain_ip)
|
||||||
|
if (fqdn in self.fqdn_list):
|
||||||
|
self.fqdn_to_update.append(fqdn)
|
||||||
|
self.subdomains_list.append(record)
|
||||||
|
|
||||||
|
logger.info("Updating subdomain(s): " + ",".join(self.fqdn_to_update))
|
||||||
|
return
|
||||||
|
|
||||||
|
def send_email_new_ip(self):
|
||||||
|
msg = EmailMessage()
|
||||||
|
msg["Subject"] = "Your public IP changed"
|
||||||
|
msg["From"] = "domain-updater@databasepro.fr"
|
||||||
|
msg["To"] = "vplesnila@gmail.com"
|
||||||
|
body = """
|
||||||
|
Hello,
|
||||||
|
You have a new public IP: %s
|
||||||
|
Following subdomains has been updated: %s
|
||||||
|
--------------
|
||||||
|
Domain Updater
|
||||||
|
""" % (self.current_ip, ", ".join(self.updated_subdomain_list))
|
||||||
|
msg.set_content(body)
|
||||||
|
s = smtplib.SMTP("localhost")
|
||||||
|
s.send_message(msg)
|
||||||
|
s.quit()
|
||||||
|
logger.info("Email sent to " + msg["To"])
|
||||||
|
return
|
||||||
|
|
||||||
|
def check_subdomains(self):
|
||||||
|
self.updated_subdomain_list=[]
|
||||||
|
for record in self.subdomains_list:
|
||||||
|
(fqdn, domain_id, subdomain_id, subdomain_ip) = record
|
||||||
|
if (subdomain_ip == self.current_ip):
|
||||||
|
logger.info(fqdn + " already set to " + subdomain_ip + ", nothing to do")
|
||||||
|
else:
|
||||||
|
logger.info("Updating " + fqdn + " with the new IP value " + self.current_ip)
|
||||||
|
self.update_subdomain(domain_id, subdomain_id)
|
||||||
|
self.updated_subdomain_list.append(fqdn)
|
||||||
|
return
|
||||||
|
|
||||||
|
def update_subdomain(self, domain_id, subdomain_id):
|
||||||
|
url_end = "/user/domain/" + str(domain_id) + "/record/" + str(subdomain_id)
|
||||||
|
data = json.dumps({"id":subdomain_id, "content":self.current_ip})
|
||||||
|
headers = { 'Content-Type': 'application/json'}
|
||||||
|
r = requests.put(self.url_base + url_end, auth = self.auth, data = data, headers=headers)
|
||||||
|
return
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
script_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
script_name = os.path.basename(__file__)
|
||||||
|
logger = start_logging(script_path + '/DomainUpdater.log')
|
||||||
|
logger.info("__BEGIN_BATCH__")
|
||||||
|
domainupdater = DomainUpdater(script_path)
|
||||||
|
domainupdater.check_subdomains()
|
||||||
|
if len(domainupdater.updated_subdomain_list) > 0:
|
||||||
|
domainupdater.send_email_new_ip()
|
||||||
|
logger.info("__END_BATCH__")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
BIN
python/DomainUpdater/__pycache__/DomainUpdater.cpython-36.pyc
Normal file
BIN
python/DomainUpdater/__pycache__/DomainUpdater.cpython-36.pyc
Normal file
Binary file not shown.
2
python/Readme.md
Normal file
2
python/Readme.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
Directory for Python scripts
|
||||||
|
----------------------------
|
||||||
15
python/scr_cod_dec/code.py
Normal file
15
python/scr_cod_dec/code.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/home/oracle/p3/bin/python
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
k_scriptdir = "/mnt/yavin4/tmp/_oracle_/gitlab2/oracle/star"
|
||||||
|
scriptlist = sorted(os.listdir(k_scriptdir))
|
||||||
|
for script in scriptlist:
|
||||||
|
if script.endswith(".sql"):
|
||||||
|
scriptlen = len(script)
|
||||||
|
script_underline = "".join("~" for i in range(scriptlen +4))
|
||||||
|
print(f"{script_underline}\n~ {script} ~\n{script_underline}")
|
||||||
|
with open(k_scriptdir + "/" + script) as f:
|
||||||
|
file_contents = f.read().splitlines()
|
||||||
|
for script_line in file_contents:
|
||||||
|
print(f"\t{script_line}")
|
||||||
34
python/scr_cod_dec/decode.py
Normal file
34
python/scr_cod_dec/decode.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/home/oracle/p3/bin/python
|
||||||
|
|
||||||
|
import re
|
||||||
|
import fileinput
|
||||||
|
|
||||||
|
ifile = "1-3.txt"
|
||||||
|
odir = "./unpack/"
|
||||||
|
|
||||||
|
fc_cnt = sum(1 for line in fileinput.input(ifile))
|
||||||
|
with open(ifile) as f:
|
||||||
|
fc = f.read().splitlines()
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
while i<fc_cnt-2:
|
||||||
|
l1 = fc[i]
|
||||||
|
l2 = fc[i+1]
|
||||||
|
l3 = fc[i+2]
|
||||||
|
if l1.startswith("~") and l1.endswith("~") and l2.startswith("~ ") and l2.endswith(".sql ~") and l3.startswith("~") and l3.endswith("~"):
|
||||||
|
# Line 2 contains the script name
|
||||||
|
sname = l2.replace("~","").replace(" ","")
|
||||||
|
scontents = ""
|
||||||
|
j = i + 4
|
||||||
|
while j<fc_cnt:
|
||||||
|
l = fc[j]
|
||||||
|
if l.startswith("~") and l.endswith("~"):
|
||||||
|
# End of script
|
||||||
|
with open(f"{odir}/{sname}", "w") as f:
|
||||||
|
f.write(scontents)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# Line is part of current script body
|
||||||
|
j += 1
|
||||||
|
scontents = scontents + l.lstrip("\t") + "\n"
|
||||||
|
i += 2
|
||||||
Reference in New Issue
Block a user