2026-03-12 21:01:38
This commit is contained in:
35
tiddlywiki/001.txt
Executable file
35
tiddlywiki/001.txt
Executable file
@@ -0,0 +1,35 @@
|
||||
ALTER SYSTEM SET events 'trace[rdbms.SQL_Optimizer.*][sql:9s5u1k3vshsw4]'
|
||||
ALTER SYSTEM SET events 'trace[rdbms.SQL_Optimizer.*][sql:9s5u1k3vshsw4] off'
|
||||
|
||||
https://support.oracle.com/epmos/faces/DocumentDisplay?_afrLoop=368303807596291&id=19708342.8&_adf.ctrl-state=1b44p4xesv_237
|
||||
|
||||
README
|
||||
------
|
||||
|
||||
Name: SQL Developer SQLcl
|
||||
Desc: Oracle SQL Developer Command Line (SQLcl) is a free command line
|
||||
interface for Oracle Database. It allows you to interactively or
|
||||
batch execute SQL and PL/SQL. SQLcl provides in-line editing, statement
|
||||
completion, and command recall for a feature-rich experience, all while
|
||||
also supporting your previously written SQL*Plus scripts.
|
||||
Version: 20.4.1
|
||||
Build: 20.4.1.351.1718
|
||||
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
Ansiconsole as default SQLFormat
|
||||
--------------------------------
|
||||
From SQLcl 20.2, AnsiConsole Format is on by default. This means that certain
|
||||
features will not work as expect until the format is set to default.
|
||||
|
||||
These include the SQL\*Plus features
|
||||
* HEADING
|
||||
* TTITLE
|
||||
* BREAK
|
||||
* COMPUTE
|
||||
|
||||
SQL> set sqlformat default.
|
||||
|
||||
If you have extensive use of SQL\*Plus style reports, you need to unset
|
||||
sqlformat via login.sql or add it to your reports
|
||||
8
tiddlywiki/002.txt
Executable file
8
tiddlywiki/002.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
# ssh-pageant
|
||||
eval $(/usr/bin/ssh-pageant -r -a "/tmp/.ssh-pageant-$USERNAME")
|
||||
===================================================================
|
||||
# ssh-pageant
|
||||
# Cygwin maps /tmp to c:\cygwin\tmp
|
||||
# MinGW maps /tmp to %TEMP% (%LocalAppData%\Temp)
|
||||
# Both consume $TEMP (user-specific) from the Windows environment though.
|
||||
eval $(/usr/local/bin/ssh-pageant -ra $TEMP/.ssh-pageant)
|
||||
7
tiddlywiki/01.md
Executable file
7
tiddlywiki/01.md
Executable file
@@ -0,0 +1,7 @@
|
||||
- ala
|
||||
- a `@fef <>`
|
||||
- b `@fef <> <>`
|
||||
- c
|
||||
- bala
|
||||
- portocala
|
||||
TEST
|
||||
372
tiddlywiki/11gR2 dataguard RAC example.txt
Executable file
372
tiddlywiki/11gR2 dataguard RAC example.txt
Executable file
@@ -0,0 +1,372 @@
|
||||
== CONTEXT
|
||||
==========
|
||||
|
||||
~~ PRIMARY cluster: vortex-db01,vortex-db02
|
||||
~~ PRIMARY database: DB_NAME=GOTAL, DB_UNIQUE_NAME=GOTALPRD
|
||||
|
||||
~~ STANDBY cluster: kessel-db01,kessel-db02
|
||||
~~ STANDBY database: DB_NAME=GOTAL, DB_UNIQUE_NAME=GOTALDRP
|
||||
|
||||
|
||||
== PRIMARY database creation
|
||||
============================
|
||||
|
||||
-- in 11gR2 version, if we want different DB_NAME <> DB_UNIQUE_NAME, for exammple: DB_NAME=GOTAL and DB_UNIQUE_NAME=GOTALPRD
|
||||
-- we should manualy create DB_NAME directory under data diskgroup before starting dbca
|
||||
|
||||
asmcmd mkdir +DATA/GOTAL
|
||||
|
||||
$ORACLE_HOME/bin/dbca \
|
||||
-silent \
|
||||
-createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-gdbName GOTAL \
|
||||
-sid GOTALPRD \
|
||||
-initParams db_unique_name=GOTALPRD \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword secret \
|
||||
-systemPassword secret \
|
||||
-emConfiguration NONE \
|
||||
-storageType ASM \
|
||||
-diskGroupName DATA \
|
||||
-redoLogFileSize 100 \
|
||||
-sampleSchema FALSE \
|
||||
-totalMemory 1000 \
|
||||
-nodelist vortex-db01,vortex-db02
|
||||
|
||||
-- dbca will create 2 directory under data diskgroup: DB_NAME and DB_UNIQUE_NAME
|
||||
-- DB_NAME directory contains only a link to physical spfile in DB_UNIQUE_NAME directory
|
||||
-- DB_NAME can pe supressed if we crete the spfile directly link under DB_UNIQUE_NAME directory and we modify the database spfile parameter value in CRS
|
||||
|
||||
SQL> create pfile='/tmp/pfile.txt' from spfile='+DATA/gotal/spfilegotalprd.ora';
|
||||
ASMCMD > rm -rf +DATA/GOTAL
|
||||
SQL> create spfile='+DATA/GOTALPRD/spfilegotalprd.ora' from pfile='/tmp/pfile.txt';
|
||||
|
||||
srvctl modify database -d GOTALPRD -p +DATA/GOTALPRD/spfilegotalprd.ora
|
||||
srvctl stop database -d GOTALPRD
|
||||
srvctl start database -d GOTALPRD
|
||||
srvctl status database -d GOTALPRD -v
|
||||
|
||||
|
||||
~~ enable ARCHIVELG mode on the PRIMARY database
|
||||
|
||||
alter system set db_recovery_file_dest_size = 4G scope=both sid='*';
|
||||
alter system set db_recovery_file_dest = '+RECO' scope=both sid='*';
|
||||
alter system set log_archive_dest_1 = 'location=USE_DB_RECOVERY_FILE_DEST' scope=both sid='*';
|
||||
|
||||
srvctl stop database -d GOTALPRD
|
||||
|
||||
startup mount exclusive
|
||||
alter database archivelog;
|
||||
alter database open;
|
||||
|
||||
srvctl stop database -d GOTALPRD
|
||||
srvctl start database -d GOTALPRD
|
||||
|
||||
alter system archive log current;
|
||||
|
||||
|
||||
== STANDBY database creation
|
||||
============================
|
||||
|
||||
- create pfile from PRIMARY spfile
|
||||
- modify pfile by replacing required values like DB_UNIQUE_NAME, INSTANCE_NAME, remote_listener etc.
|
||||
- copy pfile on a STANDBY host and test a startup nomount
|
||||
- copy the passwordfile from PRIMARY to STANDBY hosts
|
||||
|
||||
|
||||
== NETWORK configuration
|
||||
========================
|
||||
|
||||
-- listener.ora enteries on vortex-db01
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = GOTALPRD_DGMGRL)
|
||||
(SID_NAME = GOTALPRD1)
|
||||
(ORACLE_HOME = /app/oracle/product/11.2/db_1)
|
||||
)
|
||||
)
|
||||
|
||||
# ...For DATAGUARD
|
||||
|
||||
-- listener.ora enteries on vortex-db02
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = GOTALPRD_DGMGRL)
|
||||
(SID_NAME = GOTALPRD2)
|
||||
(ORACLE_HOME = /app/oracle/product/11.2/db_1)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
-- listener.ora enteries on kessel-db01
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = GOTALDRP_DGMGRL)
|
||||
(SID_NAME = GOTALDRP1)
|
||||
(ORACLE_HOME = /app/oracle/product/11.2/db_1)
|
||||
)
|
||||
)
|
||||
|
||||
# ...For DATAGUARD
|
||||
|
||||
-- listener.ora enteries on kessel-db02
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = GOTALDRP_DGMGRL)
|
||||
(SID_NAME = GOTALDRP2)
|
||||
(ORACLE_HOME = /app/oracle/product/11.2/db_1)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
-- GLOBAL_DBNAME value is the name of the service visible with:
|
||||
lsnrctl services LISTENER_DG
|
||||
|
||||
-- cross connection tests; we should be able to connect to iddle instances too
|
||||
sqlplus /nolog
|
||||
connect sys/secret@vortex-db01-dba-vip:1541/GOTALPRD_DGMGRL as sysdba
|
||||
connect sys/secret@vortex-db02-dba-vip:1541/GOTALPRD_DGMGRL as sysdba
|
||||
connect sys/secret@kessel-db01-dba-vip:1541/GOTALDRP_DGMGRL as sysdba
|
||||
(for the moment the standby pfile/passwordfile are not deployed on second node of the standby cluster)
|
||||
|
||||
-- aliases to add on tnsnames.ora on all database nodes
|
||||
# For DATAGUARD...
|
||||
GOTALPRD_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = GOTALPRD_DGMGRL)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
GOTALDRP_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = GOTALDRP_DGMGRL)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
-- connexion test using TNS aliases
|
||||
-- we should be able to connect to iddle instances
|
||||
|
||||
sqlplus /nolog
|
||||
connect sys/secret@GOTALPRD_DG as sysdba
|
||||
connect sys/secret@GOTALDRP_DG as sysdba
|
||||
|
||||
-- put the primary database in FORCE LOGGING mode
|
||||
SQL> alter database force logging;
|
||||
SQL> select force_logging from gv$database;
|
||||
|
||||
-- from the spfile of primary DB we create an spfile for the secondary DB and we start thesecondary DB in nomount
|
||||
rman target sys/secret@GOTALPRD_DG auxiliary sys/secret@GOTALDRP_DG
|
||||
run {
|
||||
allocate channel pri1 device type DISK;
|
||||
allocate channel pri2 device type DISK;
|
||||
allocate auxiliary channel aux1 device type DISK;
|
||||
allocate auxiliary channel aux2 device type DISK;
|
||||
duplicate target database
|
||||
for standby
|
||||
from active database
|
||||
nofilenamecheck;
|
||||
|
||||
}
|
||||
|
||||
~~ Dataguard Broker configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
-- on primary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/GOTALPRD/dr1GOTALPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/GOTALPRD/dr2GOTALPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
-- on secondary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/GOTALDRP/dr1GOTALDRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/GOTALDRP/dr2GOTALFRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
|
||||
-- creation of STANDBY REDELOG on both databases
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
|
||||
|
||||
select GROUP#,THREAD#,STATUS, BYTES from v$standby_log;
|
||||
|
||||
col MEMBER for a60
|
||||
select * from v$logfile;
|
||||
|
||||
|
||||
-- create DGMGRL configuration
|
||||
dgmgrl
|
||||
DGMGRL> connect sys/secret@GOTALPRD_DG
|
||||
DGMGRL> create configuration GOTAL as
|
||||
primary database is GOTALPRD
|
||||
connect identifier is GOTALPRD_DG;
|
||||
DGMGRL> add database GOTALDRP
|
||||
as connect identifier is GOTALDRP_DG
|
||||
maintained as physical;
|
||||
|
||||
DGMGRL> edit database 'gotaldrp' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'gotaldrp' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'gotaldrp' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'gotaldrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> edit database 'gotalprd' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'gotalprd' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'gotalprd' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'gotalprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> enable configuration;
|
||||
DGMGRL> show configuration;
|
||||
|
||||
-- VERY IMPORANT
|
||||
-- set StaticConnectIdentifier for all PRIMARY/DATAGUARD database instances
|
||||
-- use complete DESCRIPTION syntax to uniquely identifiing the instances of each node
|
||||
|
||||
EDIT INSTANCE 'GOTALPRD1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=GOTALPRD_DGMGRL)(INSTANCE_NAME=GOTALPRD1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'GOTALPRD2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=GOTALPRD_DGMGRL)(INSTANCE_NAME=GOTALPRD2)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'GOTALDRP1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=GOTALDRP_DGMGRL)(INSTANCE_NAME=GOTALDRP1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'GOTALDRP2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=GOTALDRP_DGMGRL)(INSTANCE_NAME=GOTALDRP2)(SERVER=DEDICATED)))';
|
||||
|
||||
-- move on ASM the spfile of the secondary database
|
||||
create pfile='/tmp/pfile.txt' from spfile='/app/oracle/product/11.2/db_1/dbs/spfileGOTALDRP1.ora';
|
||||
create spfile ='+DATA/gotaldrp/spfileGOTALDRP.ora' from pfile='/tmp/pfile.txt';
|
||||
|
||||
-- on secodary servers (kessel-db01 and kessel-db02)
|
||||
init.ora:
|
||||
spfile ='+DATA/gotaldrp/spfileGOTALDRP.ora'
|
||||
|
||||
-- register standby database in the CRS
|
||||
srvctl add database -d GOTALDRP -o /app/oracle/product/11.2/db_1 -c RAC -p '+DATA/gotaldrp/spfileGOTALDRP.ora' -r physical_standby -n GOTAL
|
||||
|
||||
-- pay attention to -s <start_options>; the default value is OPEN, that means that your DATAGUARD will be OPENED (active DATAGUARD)
|
||||
|
||||
srvctl add instance -d GOTALDRP -i GOTALDRP1 -n kessel-db01
|
||||
srvctl add instance -d GOTALDRP -i GOTALDRP2 -n kessel-db02
|
||||
|
||||
srvctl start database -d GOTALDRP
|
||||
srvctl status database -d GOTALDRP -v
|
||||
|
||||
|
||||
|
||||
== SWITCHOVER/SWITCHBACK
|
||||
========================
|
||||
|
||||
~~ Switchover
|
||||
~~~~~~~~~~~~~
|
||||
DGMGRL> switchover to 'gotaldrp'
|
||||
|
||||
~~ Switchback
|
||||
~~~~~~~~~~~~~
|
||||
DGMGRL> switchover to 'gotaldrp'
|
||||
|
||||
|
||||
== Other operations
|
||||
===================
|
||||
|
||||
-- STOP/START Media Recovery Process (MRP) on the STANDBY
|
||||
DGMGRL> edit database 'gotalprd' set STATE='LOG-APPLY-OFF';
|
||||
DGMGRL> edit database 'gotalprd' set STATE='ONLINE';
|
||||
|
||||
== DATABASE SEVICES considerations
|
||||
==================================
|
||||
|
||||
~~ keep in mind that in a RAC environement, database services are declared in the CRS and stored in the CRS and in the database also
|
||||
~~ as CRS are differents on PRIMARY / SECONDARY clusters, we should declare every service twice: on PRIMARY CRS and on SECONDARY CRS
|
||||
~~ to differentiate target status of a service along a database role
|
||||
~~ the services should be created with -l <role> option
|
||||
|
||||
~~in the next exemple, we will create a GOTAL_WEB_APPLICATION service for primary database and a GOTAL_ADHOC_REPORTING on the read-only standby
|
||||
|
||||
~~ on vortex-db01 (part of primary cluster)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
srvctl add service -d GOTALPRD -s GOTAL_WEB_APPLICATION -r "GOTALPRD1,GOTALPRD2" -P BASIC -l primary
|
||||
srvctl start service -d GOTALPRD -s GOTAL_WEB_APPLICATION
|
||||
|
||||
srvctl add service -d GOTALPRD -s GOTAL_ADHOC_REPORTING -r "GOTALPRD1,GOTALPRD2" -P BASIC -l physical_standby
|
||||
|
||||
~~ the service will be created in the database when the service is starting
|
||||
~~ for propagation on the standby, force archive of log current logfile
|
||||
|
||||
srvctl start service -d GOTALPRD -s GOTAL_ADHOC_REPORTING
|
||||
srvctl stop service -d GOTALPRD -s GOTAL_ADHOC_REPORTING
|
||||
|
||||
SQL> alter system archive log current;
|
||||
|
||||
|
||||
~~ on vkessel-db01 (part of secondary cluster)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
srvctl add service -d GOTALDRP -s GOTAL_ADHOC_REPORTING -r "GOTALDRP1,GOTALDRP2" -P BASIC -l physical_standby
|
||||
srvctl start service -d GOTALDRP -s GOTAL_ADHOC_REPORTING
|
||||
|
||||
srvctl add service -d GOTALDRP -s GOTAL_WEB_APPLICATION -r "GOTALDRP1,GOTALDRP2" -P BASIC -l primary
|
||||
|
||||
~~ on CLIENT side
|
||||
~~~~~~~~~~~~~~~~~
|
||||
~~ aliases in tnsnames.ora for transparent switchover/failover
|
||||
|
||||
GOTAL_WEB_APPLICATION =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-scan)(PORT = 1521))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-scan)(PORT = 1521))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVICE_NAME = GOTAL_WEB_APPLICATION)
|
||||
)
|
||||
)
|
||||
|
||||
GOTAL_ADHOC_REPORTING =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-scan)(PORT = 1521))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-scan)(PORT = 1521))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVICE_NAME = GOTAL_ADHOC_REPORTING)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
282
tiddlywiki/12.1 dataguard RAC CDB example.txt
Executable file
282
tiddlywiki/12.1 dataguard RAC CDB example.txt
Executable file
@@ -0,0 +1,282 @@
|
||||
~~ creation of CDB database
|
||||
|
||||
$ORACLE_HOME/bin/dbca \
|
||||
-silent \
|
||||
-createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-gdbName EWOK \
|
||||
-sid EWOKPRD \
|
||||
-initParams db_unique_name=EWOKPRD \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword secret \
|
||||
-systemPassword secret \
|
||||
-emConfiguration NONE \
|
||||
-createAsContainerDatabase TRUE \
|
||||
-storageType ASM \
|
||||
-diskGroupName DATA \
|
||||
-redoLogFileSize 100 \
|
||||
-sampleSchema FALSE \
|
||||
-totalMemory 2048 \
|
||||
-databaseConfType RAC \
|
||||
-nodelist vortex-db01,vortex-db02
|
||||
|
||||
|
||||
~~ identify the spfile and passwordfile ASM location and more readable aliases
|
||||
srvctl config database -d EWOKPRD
|
||||
|
||||
ASMCMD [+] > cd +DATA/EWOKPRD/
|
||||
ASMCMD [+DATA/EWOKPRD] > mkalias +DATA/EWOKPRD/PARAMETERFILE/spfile.333.957718565 spfileewokprd.ora
|
||||
ASMCMD [+DATA/EWOKPRD] > mkalias +DATA/EWOKPRD/PASSWORD/pwdewokprd.308.957717627 orapwewokprd
|
||||
|
||||
~~ update spfile location in the CRS
|
||||
srvctl modify database -db EWOKPRD -spfile +DATA/EWOKPRD/spfileewokprd.ora
|
||||
srvctl modify database -db EWOKPRD -pwfile +DATA/EWOKPRD/orapwewokprd
|
||||
srvctl stop database -d EWOKPRD
|
||||
srvctl start database -d EWOKPRD
|
||||
srvctl status database -d EWOKPRD -v
|
||||
|
||||
|
||||
~~ enable ARCHIVELG mode and FORCE LOGGING on the PRIMARY database
|
||||
|
||||
alter system set db_recovery_file_dest_size = 4G scope=both sid='*';
|
||||
alter system set db_recovery_file_dest = '+RECO' scope=both sid='*';
|
||||
alter system set log_archive_dest_1 = 'location=USE_DB_RECOVERY_FILE_DEST' scope=both sid='*';
|
||||
|
||||
srvctl stop database -d EWOKPRD
|
||||
|
||||
startup mount exclusive
|
||||
alter database archivelog;
|
||||
alter database open;
|
||||
alter database force logging;
|
||||
|
||||
srvctl stop database -d EWOKPRD
|
||||
srvctl start database -d EWOKPRD
|
||||
|
||||
alter system archive log current;
|
||||
|
||||
~~ copy pfile and passwordfile from primary cluster to first node of the stabdby cluster
|
||||
|
||||
SQL> create pfile='/tmp/pfile_EWOK.ora' from spfile;
|
||||
asmcmd cp +DATA/EWOKPRD/orapwewokprd /tmp
|
||||
cd /tmp
|
||||
scp orapwewokprd pfile_EWOK.ora kessel-db01/tmp
|
||||
|
||||
~~ make adjustements in pfile and put all in $ORACLE_HOME/dbs
|
||||
|
||||
SQL> create spfile from pfile='/tmp/standby.ora';
|
||||
cp orapwewokprd $ORACLE_HOME/dbs/orapwEWOKDRP1
|
||||
|
||||
SQL> startup nomount
|
||||
|
||||
~~ NETWORK configuration - listeners
|
||||
~~ in my confoguration I have a dedicated listener for DATAGUARD; following definitions has been added on primary cluster:
|
||||
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = EWOKPRD_DGMGRL)
|
||||
(SID_NAME = EWOKPRD1)
|
||||
(ORACLE_HOME = /app/oracle/product/12.1/db_1)
|
||||
)
|
||||
)
|
||||
|
||||
# ...For DATAGUARD
|
||||
|
||||
~~ and on standby cluster:
|
||||
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = EWOKDRP_DGMGRL)
|
||||
(SID_NAME = EWOKDRP1)
|
||||
(ORACLE_HOME = /app/oracle/product/12.1/db_1)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
~~ cross connection tests; we should be able to connect to iddle instances too
|
||||
sqlplus /nolog
|
||||
connect sys/secret@vortex-db01-dba-vip:1541/EWOKPRD_DGMGRL as sysdba
|
||||
connect sys/secret@vortex-db02-dba-vip:1541/EWOKPRD_DGMGRL as sysdba
|
||||
connect sys/secret@kessel-db01-dba-vip:1541/EWOKDRP_DGMGRL as sysdba
|
||||
(for the moment the standby pfile/passwordfile are not deployed on second node of the standby cluster)
|
||||
|
||||
~~ aliases to add on tnsnames.ora on all primary/standby database nodes
|
||||
# For DATAGUARD...
|
||||
EWOKPRD_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = EWOKPRD_DGMGRL)
|
||||
)
|
||||
)
|
||||
|
||||
EWOKDRP_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = EWOKDRP_DGMGRL)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
~~ cross connexion test using TNS aliases; we should be able to connect to iddle instances
|
||||
|
||||
sqlplus /nolog
|
||||
connect sys/secret@EWOKPRD_DG as sysdba
|
||||
connect sys/secret@EWOKDRP_DG as sysdba
|
||||
|
||||
|
||||
~~ from the spfile of primary DB we create an spfile for the secondary DB and we start thesecondary DB in nomount
|
||||
rman target sys/secret@EWOKPRD_DG auxiliary sys/secret@EWOKDRP_DG
|
||||
run {
|
||||
allocate channel pri1 device type DISK;
|
||||
allocate channel pri2 device type DISK;
|
||||
allocate auxiliary channel aux1 device type DISK;
|
||||
allocate auxiliary channel aux2 device type DISK;
|
||||
duplicate target database
|
||||
for standby
|
||||
from active database
|
||||
nofilenamecheck
|
||||
using compressed backupset section size 1G;
|
||||
}
|
||||
|
||||
|
||||
~~ Dataguard Broker configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ on primary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/EWOKPRD/dr1EWOKPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/EWOKPRD/dr2EWOKPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
~~ on secondary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/EWOKDRP/dr1EWOKDRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/EWOKDRP/dr2EWOKFRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
~~ creation of STANDBY REDELOG on both databases
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
|
||||
|
||||
select GROUP#,THREAD#,STATUS, BYTES from v$standby_log;
|
||||
|
||||
col MEMBER for a60
|
||||
select * from v$logfile;
|
||||
|
||||
|
||||
~~ create DGMGRL configuration
|
||||
dgmgrl
|
||||
DGMGRL> connect sys/secret@EWOKPRD_DG
|
||||
DGMGRL> create configuration EWOK as
|
||||
primary database is EWOKPRD
|
||||
connect identifier is EWOKPRD_DG;
|
||||
DGMGRL> add database EWOKDRP
|
||||
as connect identifier is EWOKDRP_DG
|
||||
maintained as physical;
|
||||
|
||||
DGMGRL> edit database 'ewokdrp' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'ewokdrp' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'ewokdrp' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'ewokdrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> edit database 'ewokprd' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'ewokprd' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'ewokprd' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'ewokprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
DGMGRL> enable configuration;
|
||||
DGMGRL> show configuration;
|
||||
|
||||
~~ VERY IMPORANT
|
||||
~~ set StaticConnectIdentifier for all PRIMARY/DATAGUARD database instances
|
||||
~~ use complete DESCRIPTION syntax to uniquely identifiing the instances of each node
|
||||
|
||||
EDIT INSTANCE 'EWOKPRD1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKPRD_DGMGRL)(INSTANCE_NAME=EWOKPRD1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKPRD2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKPRD_DGMGRL)(INSTANCE_NAME=EWOKPRD2)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKDRP1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKDRP_DGMGRL)(INSTANCE_NAME=EWOKDRP1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKDRP2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKDRP_DGMGRL)(INSTANCE_NAME=EWOKDRP2)(SERVER=DEDICATED)))';
|
||||
|
||||
|
||||
~~ move spfile from file system to ASM
|
||||
create pfile='/tmp/pfile_EWOKDRP.ora' from spfile;
|
||||
create spfile ='+DATA/ewokdrp/spfileEWOKDRP.ora' from pfile='/tmp/pfile_EWOKDRP.ora';
|
||||
|
||||
~~ register standby database in the CRS
|
||||
srvctl add database -d EWOKDRP -o /app/oracle/product/12.1/db_1 -c RAC -p '+DATA/EWOKDRP/spfileEWOKDRP.ora' -r physical_standby -n EWOK
|
||||
|
||||
~~ pay attention to -s <start_options>; the default value is OPEN, that means that your DATAGUARD will be OPENED (active DATAGUARD)
|
||||
srvctl add instance -d EWOKDRP -i EWOKDRP1 -n kessel-db01
|
||||
srvctl add instance -d EWOKDRP -i EWOKDRP2 -n kessel-db02
|
||||
|
||||
srvctl start database -d EWOKDRP -o mount
|
||||
srvctl status database -d EWOKDRP -v
|
||||
|
||||
~~ finally, move passwordfile to ASM using pwcopy under asmcmd
|
||||
~~ note that if the passwordfile is created on DB_UNKNOW ASM directory using --dbuniquename in pwdcopy could be necessary
|
||||
asmcmd pwcopy +DATA/EWOKPRD/orapwewokprd /tmp/orapwewokprd
|
||||
scp /tmp/orapwewokprd kessel-db01:/tmp/orapwewokprd
|
||||
asmcmd pwcopy /tmp/orapwewokprd +DATA/EWOKDRP/orapwewokdrp
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
43
tiddlywiki/ASH - examples.txt
Executable file
43
tiddlywiki/ASH - examples.txt
Executable file
@@ -0,0 +1,43 @@
|
||||
col SAMPLE_TIME for a21
|
||||
col Mb for 999 999 999
|
||||
|
||||
select
|
||||
SAMPLE_TIME
|
||||
,SQL_ID
|
||||
,SESSION_ID
|
||||
,PGA_ALLOCATED/1024/1024 Mb
|
||||
,TEMP_SPACE_ALLOCATED/1024 Mb
|
||||
from
|
||||
DBA_HIST_ACTIVE_SESS_HISTORY
|
||||
where
|
||||
SAMPLE_TIME between to_date('2020-05-16 11:00','YYYY-MM-DD HH24:MI') and to_date('2020-05-16 12:00','YYYY-MM-DD HH24:MI')
|
||||
order by
|
||||
SAMPLE_TIME asc
|
||||
/
|
||||
|
||||
|
||||
|
||||
select
|
||||
max(PGA_ALLOCATED/1024/1024) Mb
|
||||
, max(TEMP_SPACE_ALLOCATED/1024) Mb
|
||||
from
|
||||
DBA_HIST_ACTIVE_SESS_HISTORY
|
||||
where
|
||||
SAMPLE_TIME between sysdate-14 and sysdate
|
||||
/
|
||||
|
||||
|
||||
select
|
||||
SAMPLE_TIME
|
||||
,SQL_ID
|
||||
,SESSION_ID
|
||||
,PGA_ALLOCATED/1024/1024 Mb
|
||||
,TEMP_SPACE_ALLOCATED/1024 Mb
|
||||
from
|
||||
DBA_HIST_ACTIVE_SESS_HISTORY
|
||||
where
|
||||
SAMPLE_TIME between sysdate-14 and sysdate
|
||||
and PGA_ALLOCATED is not null
|
||||
order by
|
||||
4 asc
|
||||
/
|
||||
44
tiddlywiki/ASH_waits_by _wait_class_for_last_2_hours.sql.txt
Executable file
44
tiddlywiki/ASH_waits_by _wait_class_for_last_2_hours.sql.txt
Executable file
@@ -0,0 +1,44 @@
|
||||
set lines 288
|
||||
col sample_time for a14
|
||||
col CONFIGURATION head "CONFIG" for 99.99
|
||||
col ADMINISTRATIVE head "ADMIN" for 99.99
|
||||
col OTHER for 99.99
|
||||
|
||||
SELECT TO_CHAR(SAMPLE_TIME, 'HH24:MI ') AS SAMPLE_TIME,
|
||||
ROUND(OTHER / 60, 3) AS OTHER,
|
||||
ROUND(CLUST / 60, 3) AS CLUST,
|
||||
ROUND(QUEUEING / 60, 3) AS QUEUEING,
|
||||
ROUND(NETWORK / 60, 3) AS NETWORK,
|
||||
ROUND(ADMINISTRATIVE / 60, 3) AS ADMINISTRATIVE,
|
||||
ROUND(CONFIGURATION / 60, 3) AS CONFIGURATION,
|
||||
ROUND(COMMIT / 60, 3) AS COMMIT,
|
||||
ROUND(APPLICATION / 60, 3) AS APPLICATION,
|
||||
ROUND(CONCURRENCY / 60, 3) AS CONCURRENCY,
|
||||
ROUND(SIO / 60, 3) AS SYSTEM_IO,
|
||||
ROUND(UIO / 60, 3) AS USER_IO,
|
||||
ROUND(SCHEDULER / 60, 3) AS SCHEDULER,
|
||||
ROUND(CPU / 60, 3) AS CPU,
|
||||
ROUND(BCPU / 60, 3) AS BACKGROUND_CPU
|
||||
FROM (SELECT TRUNC(SAMPLE_TIME, 'MI') AS SAMPLE_TIME,
|
||||
DECODE(SESSION_STATE,
|
||||
'ON CPU',
|
||||
DECODE(SESSION_TYPE, 'BACKGROUND', 'BCPU', 'ON CPU'),
|
||||
WAIT_CLASS) AS WAIT_CLASS
|
||||
FROM V$ACTIVE_SESSION_HISTORY
|
||||
WHERE SAMPLE_TIME > SYSDATE - INTERVAL '2'
|
||||
HOUR
|
||||
AND SAMPLE_TIME <= TRUNC(SYSDATE, 'MI')) ASH PIVOT(COUNT(*)
|
||||
FOR WAIT_CLASS IN('ON CPU' AS CPU,'BCPU' AS BCPU,
|
||||
'Scheduler' AS SCHEDULER,
|
||||
'User I/O' AS UIO,
|
||||
'System I/O' AS SIO,
|
||||
'Concurrency' AS CONCURRENCY,
|
||||
'Application' AS APPLICATION,
|
||||
'Commit' AS COMMIT,
|
||||
'Configuration' AS CONFIGURATION,
|
||||
'Administrative' AS ADMINISTRATIVE,
|
||||
'Network' AS NETWORK,
|
||||
'Queueing' AS QUEUEING,
|
||||
'Cluster' AS CLUST,
|
||||
'Other' AS OTHER))
|
||||
/
|
||||
28
tiddlywiki/AWR - extract a statistic history.txt
Executable file
28
tiddlywiki/AWR - extract a statistic history.txt
Executable file
@@ -0,0 +1,28 @@
|
||||
col STAT_NAME for a20
|
||||
col VALUE_DIFF for 9999999999
|
||||
col STAT_PER_MIN for 9999,999,999
|
||||
set lines 200 pages 1500 long 99999999
|
||||
col BEGIN_INTERVAL_TIME for a30
|
||||
col END_INTERVAL_TIME for a30
|
||||
set pagesize 40
|
||||
set pause on
|
||||
|
||||
|
||||
select hsys.SNAP_ID,
|
||||
hsnap.BEGIN_INTERVAL_TIME,
|
||||
hsnap.END_INTERVAL_TIME,
|
||||
hsys.STAT_NAME,
|
||||
hsys.VALUE,
|
||||
hsys.VALUE - LAG(hsys.VALUE,1,0) OVER (ORDER BY hsys.SNAP_ID) AS "VALUE_DIFF",
|
||||
round((hsys.VALUE - LAG(hsys.VALUE,1,0) OVER (ORDER BY hsys.SNAP_ID)) /
|
||||
round(abs(extract(hour from (hsnap.END_INTERVAL_TIME - hsnap.BEGIN_INTERVAL_TIME))*60 +
|
||||
extract(minute from (hsnap.END_INTERVAL_TIME - hsnap.BEGIN_INTERVAL_TIME)) +
|
||||
extract(second from (hsnap.END_INTERVAL_TIME - hsnap.BEGIN_INTERVAL_TIME))/60),1)) "STAT_PER_MIN"
|
||||
from dba_hist_sysstat hsys, dba_hist_snapshot hsnap where
|
||||
hsnap.BEGIN_INTERVAL_TIME between to_date('30-11-2019','DD-MM-YYYY') and to_date('01-12-2019','DD-MM-YYYY')
|
||||
and hsys.snap_id = hsnap.snap_id
|
||||
and hsnap.instance_number in (select instance_number from v$instance)
|
||||
and hsnap.instance_number = hsys.instance_number
|
||||
and hsys.STAT_NAME='logons current'
|
||||
order by 1;
|
||||
|
||||
19
tiddlywiki/Anglais - draft.txt
Executable file
19
tiddlywiki/Anglais - draft.txt
Executable file
@@ -0,0 +1,19 @@
|
||||
I had been in town less than a half hour.
|
||||
|
||||
scar = cicatrice
|
||||
scarce = rare
|
||||
|
||||
pick over something = to talk about something in detail
|
||||
embroide = broder
|
||||
quite = assez // I'm quite sure
|
||||
|
||||
these = ceux-ci/celles-ci
|
||||
their = leur
|
||||
neat and tidy = propre et net
|
||||
fit lean = maigre
|
||||
elbow = coude
|
||||
textbook move = comme dans un manuel, classique
|
||||
it could have ended badly = OK
|
||||
|
||||
somehow = en quelque sorte
|
||||
drawback = inconvéniant
|
||||
8
tiddlywiki/Apache HTTPD.tid
Executable file
8
tiddlywiki/Apache HTTPD.tid
Executable file
@@ -0,0 +1,8 @@
|
||||
created: 20190622102026604
|
||||
creator: vplesnila
|
||||
modified: 20190622102030669
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: Apache HTTPD
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
8
tiddlywiki/Apache httpd - divers.txt
Executable file
8
tiddlywiki/Apache httpd - divers.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
-- Reverse Proxy
|
||||
ProxyPass "/app/" "http://server1551:9027/"
|
||||
ProxyPassReverse "/app/" "http://server1551:9027/"
|
||||
|
||||
-- Replace content using mod_substitute module
|
||||
AddOutputFilterByType SUBSTITUTE text/html
|
||||
Substitute "s|/AdminLTE/|/dbservices-dev/AdminLTE/"
|
||||
|
||||
42
tiddlywiki/Apache httpd starting with systemd.tid
Executable file
42
tiddlywiki/Apache httpd starting with systemd.tid
Executable file
@@ -0,0 +1,42 @@
|
||||
created: 20190617091940764
|
||||
creator: vplesnila
|
||||
modified: 20200122095305456
|
||||
modifier: vplesnila
|
||||
tags: Linux [[Apache HTTPD]]
|
||||
title: Apache httpd starting with systemd
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
Create `httpd.service` unit file in `/usr/lib/systemd/system`
|
||||
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description=Apache Web Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/app/apache_httpd/2.4.39/logs/httpd.pid
|
||||
ExecStart=/app/apache_httpd/2.4.39/bin/apachectl start
|
||||
ExecStop=/app/apache_httpd/2.4.39/bin/apachectl graceful-stop
|
||||
ExecReload=/app/apache_httpd/2.4.39/bin/apachectl graceful
|
||||
PrivateTmp=true
|
||||
LimitNOFILE=infinity
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
systemctl daemon-reload
|
||||
systemctl enable httpd
|
||||
systemctl stop httpd
|
||||
systemctl start httpd
|
||||
systemctl status httpd
|
||||
```
|
||||
|
||||
//Note//: in 2.4.41 version, PIDFile in unit definition do not works.
|
||||
|
||||
5
tiddlywiki/Bookmarks.md
Executable file
5
tiddlywiki/Bookmarks.md
Executable file
@@ -0,0 +1,5 @@
|
||||
- [Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
|
||||
- [Transfer a Windows 10 license to a new PC](https://www.windowscentral.com/how-transfer-windows-10-license-new-computer-or-hard-drive)
|
||||
- [Comment l'optimiseur d'Oracle calcule le coût](https://marius-nitu.developpez.com/tutoriels/oracle/optimiseur/comment-optimiseur-oracle-calcule-cout/)
|
||||
- [How to set up "RedoRoutes" in a Data Guard Broker configuration](https://minimalistic-oracle.blogspot.com/2021/04/how-to-set-up-redoroutes-in-data-guard.html)
|
||||
|
||||
35
tiddlywiki/Brico.md
Executable file
35
tiddlywiki/Brico.md
Executable file
@@ -0,0 +1,35 @@
|
||||
https://demolition-debarras.com/
|
||||
|
||||
# Scripts
|
||||
|
||||
### make-fedora-rpm.py
|
||||
|
||||
Fedora-specific script that ties it all together. Run it like:
|
||||
|
||||
./make-fedora-rpm.py
|
||||
|
||||
What it does roughly:
|
||||
|
||||
* Extracts all the .zip files in $scriptdir/new-builds/ to a temporary directory. The .zip files should contain all the build input for `make-driver-dir.py`. I prepopulate this with `fetch-latest-builds.py` but other people can use the build input mirror mentioned above.
|
||||
* Runs `make-driver-dir.py` on the unzipped output
|
||||
* Runs `make-virtio-win-rpm-archive.py` on the make-driver-dir.py output
|
||||
* Updates the virtio-win.spec
|
||||
* Runs `./make-repo.py`
|
||||
|
||||
|
||||
Jfrjfr r ng tng th t,h ,ty h
|
||||
|
||||
vfvf
|
||||
gthth
|
||||
hythyt
|
||||
|
||||
|
||||
### make-installer.py
|
||||
|
||||
This uses a [virtio-win-guest-tools-installer.git](https://github.com/virtio-win/virtio-win-guest-tools-installer]) git submodule to build .msi installers
|
||||
for all the drivers. Invoking this successfully requires quite a few RPMs installed on the host
|
||||
|
||||
* `wix-toolset-binaries`, example: https://resources.ovirt.org/pub/ovirt-master-snapshot/rpm/fc32/noarch/wix-toolset-binaries-3.11.1-2.fc32.noarch.rpm
|
||||
* `ovirt-guest-agent-windows`, example: https://resources.ovirt.org/pub/ovirt-4.3-snapshot/rpm/fc30/noarch/ovirt-guest-agent-windows-1.0.16-1.20191009081759.git1048b68.fc30.noarch.rpm
|
||||
* `wine` from distro repos
|
||||
|
||||
8
tiddlywiki/CRS resources check examples.txt
Executable file
8
tiddlywiki/CRS resources check examples.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
# not started databases
|
||||
crsctl status resource -w "((TYPE = ora.database.type) AND (LAST_SERVER = $(hostname -s)) AND (STATE != ONLINE))"
|
||||
|
||||
# not started services
|
||||
crsctl status resource -w "((TYPE = ora.service.type) AND (LAST_SERVER = $(hostname -s)) AND (STATE != ONLINE))"
|
||||
|
||||
# list in tabular mode services not started but having a target=ONLINE
|
||||
crsctl status resource -t -w "((TYPE = ora.service.type) AND (LAST_SERVER = $(hostname -s)) AND (TARGET = ONLINE) AND (STATE != ONLINE))"
|
||||
24
tiddlywiki/Captures.txt
Executable file
24
tiddlywiki/Captures.txt
Executable file
@@ -0,0 +1,24 @@
|
||||
-- LIST CAPTURES
|
||||
|
||||
set lines 180
|
||||
col CAPTURE_NAME for a50
|
||||
|
||||
select CAPTURE_NAME,STATUS from dba_capture;
|
||||
|
||||
-- STOP CAPTURE
|
||||
|
||||
BEGIN
|
||||
DBMS_CAPTURE_ADM.STOP_CAPTURE(
|
||||
capture_name => 'OGG$CAP_OEDCLJJ',
|
||||
force => true);
|
||||
END;
|
||||
/
|
||||
|
||||
-- DROP CAPTURE
|
||||
|
||||
BEGIN
|
||||
DBMS_CAPTURE_ADM.DROP_CAPTURE(
|
||||
capture_name => 'OGG$CAP_OEDINJJ',
|
||||
drop_unused_rule_sets => true);
|
||||
END;
|
||||
/
|
||||
1
tiddlywiki/Change hostname in Linux.txt
Executable file
1
tiddlywiki/Change hostname in Linux.txt
Executable file
@@ -0,0 +1 @@
|
||||
hostnamectl set-hostname host.example.com
|
||||
39
tiddlywiki/Changing the Oracle Grid Infrastructure Home Path.tid
Executable file
39
tiddlywiki/Changing the Oracle Grid Infrastructure Home Path.tid
Executable file
@@ -0,0 +1,39 @@
|
||||
created: 20200218153929609
|
||||
creator: vplesnila
|
||||
modified: 20200218161821409
|
||||
modifier: vplesnila
|
||||
tags: Oracle
|
||||
title: Changing the Oracle Grid Infrastructure Home Path
|
||||
type: text/plain
|
||||
|
||||
~~ Moving ORACLE_HOME
|
||||
~~ from /app/grid/product/19.3
|
||||
~~ to /app/grid/product/19
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ as root stop the CRS
|
||||
crsctl stop crs
|
||||
|
||||
~~ as grid, detach the grid ORACLE_HOME
|
||||
/app/grid/product/19.3/oui/bin/runInstaller -silent -waitforcompletion\
|
||||
-detachHome ORACLE_HOME='/app/grid/product/19.3' -local
|
||||
|
||||
~~ as root, move the Grid binaries from the old Grid home location to the new Grid home location
|
||||
cp -Rp /app/grid/product/19.3 /app/grid/product/19
|
||||
|
||||
~~ as root Unlock the destination Grid home
|
||||
/app/grid/product/19/crs/install/rootcrs.sh -unlock -dstcrshome /app/grid/product/19
|
||||
|
||||
~~ as grid relink Grid binaries
|
||||
~~ set up your environement variable according to the new ORACLE_HOME
|
||||
/app/grid/product/19/bin/relink
|
||||
|
||||
~~ as root lock the destination Grid home
|
||||
/app/grid/product/19/crs/install/rootcrs.sh -lock
|
||||
|
||||
~~ as root move Grid home to the new destination and start CRS
|
||||
/app/grid/product/19/crs/install/rootcrs.sh -move -dstcrshome /app/grid/product/19
|
||||
|
||||
~~ as grid, attach the new home in Oracle Inventory
|
||||
/app/grid/product/19/oui/bin/runInstaller -attachhome ORACLE_HOME=/app/grid/product/19 ORACLE_HOME_NAME="OraGI19Home1"
|
||||
/app/grid/product/19/OPatch/opatch lsinventory
|
||||
17
tiddlywiki/Citrix - ALT+TAB remote.tid
Executable file
17
tiddlywiki/Citrix - ALT+TAB remote.tid
Executable file
@@ -0,0 +1,17 @@
|
||||
created: 20191026073454809
|
||||
creator: vplesnila
|
||||
modified: 20191026073843159
|
||||
modifier: vplesnila
|
||||
tags: Divers
|
||||
title: Citrix - ALT+TAB remote
|
||||
type: text/plain
|
||||
|
||||
-- source: https://www.lewan.com/blog/2013/06/14/enable-alttab-application-toggling-in-a-citrix-xenapp-desktop-session
|
||||
|
||||
- Open regedit on the client device to edit the registry
|
||||
- Navigate to the key:
|
||||
HKEY_LOCAL_MACHINE \SOFTWARE\Citrix\ICAClient\Engine\Lockdown Profiles\All Regions\Lockdown\Virtual Channels\Keyboard\
|
||||
- Open Key: TransparentKeyPassthrough
|
||||
- Set the value to: Remote
|
||||
- Exit the Citrix receiver if it is started and log back into your Citrix desktop.
|
||||
- When the Citrix desktop session is the Active window, you will be able to toggle between the applications in that session with Alt+Tab.
|
||||
8
tiddlywiki/Contents.tid
Executable file
8
tiddlywiki/Contents.tid
Executable file
@@ -0,0 +1,8 @@
|
||||
created: 20190616214114844
|
||||
creator: vplesnila
|
||||
modified: 20190618155452589
|
||||
modifier: vplesnila
|
||||
title: Contents
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
<$list filter={{$:/core/Filters/AllTiddlers!!filter}} template="$:/core/ui/ListItemTemplate"/>
|
||||
132
tiddlywiki/Create RAC CDB database manually.txt
Executable file
132
tiddlywiki/Create RAC CDB database manually.txt
Executable file
@@ -0,0 +1,132 @@
|
||||
~~ Context: DBNAME=HUTT, db_unique_name=HUTTPRD, instances HUTT1/HUTT2
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ NOTE: the procedure is identical to creating a non CDB database
|
||||
~~ the ONLY difference is enable_pluggable_database=true parameter in init.ora
|
||||
|
||||
mkdir -p /app/base/admin/HUTTPRD
|
||||
cd /app/base/admin/HUTTPRD
|
||||
mkdir scripts divers adump init diag
|
||||
|
||||
~~~~~~~~~~~~
|
||||
initHUTT.ora
|
||||
~~~~~~~~~~~~
|
||||
*.enable_pluggable_database=true
|
||||
*.cluster_database=false
|
||||
*.db_name=HUTT
|
||||
*.db_unique_name=HUTTPRD
|
||||
*.compatible=19.0.0
|
||||
*.control_files=(+DATA/HUTTPRD/control01.ctl,+DATA/HUTTPRD/control02.ctl)
|
||||
*.db_create_file_dest=+DATA
|
||||
*.db_create_online_log_dest_1=+DATA
|
||||
*.db_recovery_file_dest_size=4G
|
||||
*.db_recovery_file_dest=+RECO
|
||||
*.log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
*.log_archive_format=%t_%s_%r.arc
|
||||
*.db_block_size=8192
|
||||
*.open_cursors=300
|
||||
*.diagnostic_dest=/app/base/admin/HUTTPRD
|
||||
*.sga_max_size=3G
|
||||
*.sga_target=3G
|
||||
*.pga_aggregate_target=512M
|
||||
*.pga_aggregate_limit=2G
|
||||
*.processes=350
|
||||
*.audit_file_dest=/app/base/admin/HUTTPRD/adump
|
||||
*.audit_trail=db
|
||||
*.remote_login_passwordfile=exclusive
|
||||
HUTT1.instance_number=1
|
||||
HUTT2.instance_number=2
|
||||
HUTT1.thread=1
|
||||
HUTT2.thread=2
|
||||
HUTT1.undo_tablespace=UNDOTBS1
|
||||
HUTT2.undo_tablespace=UNDOTBS2
|
||||
|
||||
|
||||
~~~~
|
||||
|
||||
startup nomount pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initHUTT.ora';
|
||||
|
||||
create database HUTT
|
||||
datafile size 700M autoextend on next 64M
|
||||
extent management local
|
||||
SYSAUX datafile size 512M autoextend on next 64M
|
||||
default temporary tablespace TEMP tempfile size 256M autoextend off
|
||||
undo tablespace UNDOTBS1 datafile size 256M autoextend off
|
||||
character set AL32UTF8
|
||||
national character set AL16UTF16
|
||||
logfile group 1 size 64M,
|
||||
group 2 size 64M
|
||||
user SYS identified by secret user SYSTEM identified by secret;
|
||||
|
||||
create undo tablespace UNDOTBS2 datafile size 256M autoextend off;
|
||||
create tablespace USERS datafile size 32M autoextend ON next 32M;
|
||||
alter database default tablespace USERS;
|
||||
|
||||
|
||||
alter database add logfile thread 2
|
||||
group 3 size 64M,
|
||||
group 4 size 64M;
|
||||
|
||||
alter database enable public thread 2;
|
||||
|
||||
~~ create dictionary objects on CDB$ROOT
|
||||
@?/rdbms/admin/catalog.sql
|
||||
@?/rdbms/admin/catproc.sql
|
||||
@?/rdbms/admin/catclust.sql
|
||||
@?/rdbms/admin/utlrp.sql
|
||||
|
||||
~~ open PDB$SEED in read/write mode and create dictionary objects on PDB$SEED
|
||||
alter session set "_oracle_script"=true;
|
||||
alter pluggable database PDB$SEED close immediate;
|
||||
alter pluggable database PDB$SEED open;
|
||||
alter session set "_oracle_script"=false;
|
||||
alter session set container=PDB$SEED;
|
||||
@?/rdbms/admin/catalog.sql
|
||||
@?/rdbms/admin/catproc.sql
|
||||
@?/rdbms/admin/catclust.sql
|
||||
@?/rdbms/admin/utlrp.sql
|
||||
alter session set "_oracle_script"=true;
|
||||
alter pluggable database PDB$SEED close immediate;
|
||||
alter pluggable database PDB$SEED open read only;
|
||||
alter session set "_oracle_script"=false;
|
||||
|
||||
|
||||
~~ add cluster_database=true in init.ora and restart instance on 2 nodes
|
||||
startup pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initHUTT.ora';
|
||||
|
||||
~~ create spfile on ASM and create $ORACLE_HOME/dbs/initXXXXX.ora on both nodes
|
||||
create spfile='+DATA/HUTTPRD/spfileHUTT.ora' from pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initHUTT.ora';
|
||||
echo "spfile='+DATA/HUTTPRD/spfileHUTT.ora'" > $ORACLE_HOME/dbs/initHUTT1.ora
|
||||
echo "spfile='+DATA/HUTTPRD/spfileHUTT.ora'" > $ORACLE_HOME/dbs/initHUTT2.ora
|
||||
|
||||
~~ register DB in CRS
|
||||
srvctl add database -d HUTTPRD -o /app/oracle/product/19 -p '+DATA/HUTTPRD/spfileHUTT.ora'
|
||||
srvctl add instance -d HUTTPRD -i HUTT1 -n vortex-db01
|
||||
srvctl add instance -d HUTTPRD -i HUTT2 -n vortex-db02
|
||||
|
||||
~~ create passwordfile on ASM; if the DB is not yet registered on CRS, you will get a WARNING
|
||||
orapwd FILE='+DATA/HUTTPRD/orapwHUTT' ENTRIES=10 DBUNIQUENAME='HUTTPRD' password=secret00!
|
||||
|
||||
~~ check database config in clusterware
|
||||
srvctl config database -db HUTTPRD
|
||||
|
||||
~~ shutdown instances with SQL*Plus and start database with srvctl
|
||||
srvctl start database -db HUTTPRD
|
||||
srvctl status database -db HUTTPRD -v
|
||||
|
||||
~~ optionally, put database in archivelog mode
|
||||
alter system set cluster_database=false scope=spfile sid='*';
|
||||
alter system set db_recovery_file_dest_size=8G scope=both sid='*';
|
||||
alter system set db_recovery_file_dest='+RECO' scope=both sid='*';
|
||||
alter system set log_archive_dest_1 = 'location=USE_DB_RECOVERY_FILE_DEST' scope=both sid='*';
|
||||
|
||||
srvctl stop database -db HUTTPRD
|
||||
|
||||
startup mount exclusive
|
||||
alter database archivelog;
|
||||
alter database open;
|
||||
|
||||
srvctl stop database -db HUTTPRD
|
||||
srvctl start database -db HUTTPRD
|
||||
|
||||
alter system archive log current;
|
||||
104
tiddlywiki/Create RAC non-CDB database manually.txt
Executable file
104
tiddlywiki/Create RAC non-CDB database manually.txt
Executable file
@@ -0,0 +1,104 @@
|
||||
~~ Context: DBNAME=JABBA, db_unique_name=JABBAPRD, instances JABBA1/JABBA2
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
mkdir -p /app/base/admin/JABBA
|
||||
cd /app/base/admin/JABBA
|
||||
mkdir scripts divers adump init diag
|
||||
|
||||
~~ initJABBA.ora
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
*.db_name=JABBA
|
||||
*.db_unique_name=JABBAPRD
|
||||
*.compatible=12.1.0.2.0
|
||||
*.control_files=(+DATA/JABBAPRD/control01.ctl,+DATA/JABBAPRD/control02.ctl)
|
||||
*.db_create_file_dest=+DATA
|
||||
*.db_create_online_log_dest_1=+DATA
|
||||
*.db_recovery_file_dest_size=4G
|
||||
*.db_recovery_file_dest=+RECO
|
||||
*.log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
*.log_archive_format=%t_%s_%r.arc
|
||||
*.db_block_size=8192
|
||||
*.open_cursors=300
|
||||
*.diagnostic_dest=/app/base/admin/JABBA
|
||||
*.sga_max_size=3G
|
||||
*.sga_target=3G
|
||||
*.pga_aggregate_target=512M
|
||||
*.pga_aggregate_limit=1G
|
||||
*.processes=350
|
||||
*.audit_file_dest=/app/base/admin/JABBA/adump
|
||||
*.audit_trail=db
|
||||
*.remote_login_passwordfile=exclusive
|
||||
JABBAPRD1.instance_number=1
|
||||
JABBAPRD2.instance_number=2
|
||||
JABBAPRD1.thread=1
|
||||
JABBAPRD2.thread=2
|
||||
JABBAPRD1.undo_tablespace=UNDOTBS1
|
||||
JABBAPRD2.undo_tablespace=UNDOTBS2
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
startup nomount pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initJABBA.ora'
|
||||
|
||||
create database JABBA
|
||||
datafile size 700M autoextend on next 64M
|
||||
extent management local
|
||||
SYSAUX datafile size 512M autoextend on next 64M
|
||||
default temporary tablespace TEMP tempfile size 256M autoextend off
|
||||
undo tablespace UNDOTBS1 datafile size 256M autoextend off
|
||||
character set AL32UTF8
|
||||
national character set AL16UTF16
|
||||
logfile group 1 size 64M,
|
||||
group 2 size 64M
|
||||
user SYS identified by secret user SYSTEM identified by secret;
|
||||
|
||||
create undo tablespace UNDOTBS2 datafile size 256M autoextend off;
|
||||
create tablespace USERS datafile size 32M autoextend ON next 32M;
|
||||
alter database default tablespace USERS;
|
||||
|
||||
@?/rdbms/admin/catalog.sql
|
||||
@?/rdbms/admin/catproc.sql
|
||||
@?/rdbms/admin/catclust.sql
|
||||
@?/rdbms/admin/utlrp.sql
|
||||
|
||||
|
||||
alter database add logfile thread 2
|
||||
group 3 size 64M,
|
||||
group 4 size 64M;
|
||||
|
||||
alter database enable public thread 2;
|
||||
|
||||
|
||||
~~ add cluster_database=true in init.ora and restart instance on 2 nodes
|
||||
startup pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initJABBA.ora'
|
||||
|
||||
~~ create spfile on ASM
|
||||
create spfile='+DATA/JABBAPRD/spfileJABBA.ora' from pfile='/mnt/yavin4/tmp/_oracle_/ad-hoc/initJABBA.ora';
|
||||
|
||||
~~ on both nodes, create init.ora under $ORACLE_HOME/dbs
|
||||
echo "spfile='+DATA/JABBAPRD/spfileJABBA.ora'" > $ORACLE_HOME/dbs/initJABBAPRD1.ora
|
||||
echo "spfile='+DATA/JABBAPRD/spfileJABBA.ora'" > $ORACLE_HOME/dbs/initJABBAPRD2.ora
|
||||
|
||||
~~ register DB in CRS
|
||||
srvctl add database -d JABBAPRD -pwfile '+DATA/JABBAPRD/orapwJABBA' -o /app/oracle/product/12.1 -p '+DATA/JABBAPRD/spfileJABBA.ora'
|
||||
|
||||
~~ create passwordfile on ASM; if the DB is not yet registered on CRS, you will get a WARNING
|
||||
orapwd FILE='+DATA/JABBAPRD/orapwJABBA' ENTRIES=10 DBUNIQUENAME='JABBAPRD' password=secret
|
||||
|
||||
srvctl add instance -d JABBAPRD -i JABBAPRD1 -n vortex-db01
|
||||
srvctl add instance -d JABBAPRD -i JABBAPRD2 -n vortex-db02
|
||||
|
||||
~~ shutdown both instances with SQL*Plus, therfore start DB with srvctl
|
||||
srvctl start database -db JABBAPRD
|
||||
srvctl status database -db JABBAPRD -v
|
||||
|
||||
~~ enable ARCHIVELOG mode
|
||||
alter system set cluster_database=false scope=spfile sid='*';
|
||||
srvctl stop database -db JABBAPRD
|
||||
|
||||
startup mount exclusive
|
||||
alter database archivelog;
|
||||
alter database open;
|
||||
alter system set cluster_database=true scope=spfile sid='*';
|
||||
|
||||
~~ shutdown database within SQL*Plus, therefore start with srvctl
|
||||
srvctl start database -db JABBAPRD
|
||||
12
tiddlywiki/Cuisson au four.tid
Executable file
12
tiddlywiki/Cuisson au four.tid
Executable file
@@ -0,0 +1,12 @@
|
||||
created: 20210131134915788
|
||||
creator: vplesnila
|
||||
modified: 20210131135040265
|
||||
modifier: vplesnila
|
||||
tags: Divers
|
||||
title: Cuisson au four
|
||||
type: text/plain
|
||||
|
||||
Rôti de porc basse temperature
|
||||
------------------------------
|
||||
Préchauffez le four à 100°C
|
||||
Enfoncez une sonde en son coeur et quand il atteint 68°C, c’est cuit
|
||||
27
tiddlywiki/DBCA example.txt
Executable file
27
tiddlywiki/DBCA example.txt
Executable file
@@ -0,0 +1,27 @@
|
||||
# create database
|
||||
$ORACLE_HOME/bin/dbca \
|
||||
-silent \
|
||||
-createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-gdbName KITKATPRD \
|
||||
-sid KITKAT \
|
||||
-initParams db_unique_name=KITKATPRD \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword ***** \
|
||||
-systemPassword ***** \
|
||||
-emConfiguration NONE \
|
||||
-createAsContainerDatabase TRUE \
|
||||
-storageType ASM \
|
||||
-diskGroupName DATA \
|
||||
-redoLogFileSize 200 \
|
||||
-sampleSchema FALSE \
|
||||
-totalMemory 4096 \
|
||||
-databaseConfType RAC \
|
||||
-nodelist dbnode1,dbnode2
|
||||
|
||||
# remove database
|
||||
$ORACLE_HOME/bin/dbca \
|
||||
-silent -deleteDatabase \
|
||||
-sourceDB KITKATPRD \
|
||||
-sysDBAUserName sys \
|
||||
-sysDBAPassword *****
|
||||
131
tiddlywiki/DBMS_FILE_TRANSFER examples.md
Executable file
131
tiddlywiki/DBMS_FILE_TRANSFER examples.md
Executable file
@@ -0,0 +1,131 @@
|
||||
On **target** database create a directory and an user for database link:
|
||||
```sql
|
||||
create directory DIR_DEST as '/mnt/yavin4/tmp/_oracle_/dir_dest';
|
||||
create user USER_DBLINK identified by *****;
|
||||
grant create session to USER_DBLINK;
|
||||
grant read,write on directory DIR_DEST to user_dblink;
|
||||
```
|
||||
|
||||
On **source** database create a directory and a database link:
|
||||
```sql
|
||||
create directory DIR_SOURCE as '/mnt/yavin4/tmp/_oracle_/dir_source';
|
||||
create database link REMOTE_DB connect to USER_DBLINK identified by ***** using 'taris/WEDGEPRD';
|
||||
select * from dual@REMOTE_DB;
|
||||
```
|
||||
|
||||
Use `DBMS_FILE_TRANSFER` from soure database to copy a single file from source directory to target directory:
|
||||
```sql
|
||||
BEGIN
|
||||
DBMS_FILE_TRANSFER.put_file(
|
||||
source_directory_object => 'DIR_SOURCE',
|
||||
source_file_name => 'Full_GREEDOPRD_01.dmp',
|
||||
destination_directory_object => 'DIR_DEST',
|
||||
destination_file_name => 'Full_GREEDOPRD_01.dmp',
|
||||
destination_database => 'REMOTE_DB');
|
||||
END;
|
||||
/
|
||||
```
|
||||
|
||||
`DBMS_FILE_TRANSFER` don't have a **parallel** option, but we can run parallel transfers using `DBMS_SCHEDULER` jobs:
|
||||
```sql
|
||||
create or replace procedure FILECOPY1 as
|
||||
BEGIN
|
||||
DBMS_FILE_TRANSFER.put_file(
|
||||
source_directory_object => 'DIR_SOURCE',
|
||||
source_file_name => 'Full_GREEDOPRD_01.dmp',
|
||||
destination_directory_object => 'DIR_DEST',
|
||||
destination_file_name => 'Full_GREEDOPRD_01.dmp',
|
||||
destination_database => 'REMOTE_DB');
|
||||
END;
|
||||
/
|
||||
|
||||
create or replace procedure FILECOPY2 as
|
||||
BEGIN
|
||||
DBMS_FILE_TRANSFER.put_file(
|
||||
source_directory_object => 'DIR_SOURCE',
|
||||
source_file_name => 'Full_GREEDOPRD_02.dmp',
|
||||
destination_directory_object => 'DIR_DEST',
|
||||
destination_file_name => 'Full_GREEDOPRD_02.dmp',
|
||||
destination_database => 'REMOTE_DB');
|
||||
END;
|
||||
/
|
||||
|
||||
create or replace procedure FILECOPY3 as
|
||||
BEGIN
|
||||
DBMS_FILE_TRANSFER.put_file(
|
||||
source_directory_object => 'DIR_SOURCE',
|
||||
source_file_name => 'Full_GREEDOPRD_03.dmp',
|
||||
destination_directory_object => 'DIR_DEST',
|
||||
destination_file_name => 'Full_GREEDOPRD_03.dmp',
|
||||
destination_database => 'REMOTE_DB');
|
||||
END;
|
||||
/
|
||||
|
||||
create or replace procedure FILECOPY4 as
|
||||
BEGIN
|
||||
DBMS_FILE_TRANSFER.put_file(
|
||||
source_directory_object => 'DIR_SOURCE',
|
||||
source_file_name => 'Full_GREEDOPRD_04.dmp',
|
||||
destination_directory_object => 'DIR_DEST',
|
||||
destination_file_name => 'Full_GREEDOPRD_04.dmp',
|
||||
destination_database => 'REMOTE_DB');
|
||||
END;
|
||||
/
|
||||
|
||||
begin
|
||||
DBMS_SCHEDULER.create_job
|
||||
(
|
||||
job_name => 'JOB_FILECOPY1',
|
||||
job_type => 'PLSQL_BLOCK',
|
||||
job_action => 'BEGIN FILECOPY1; END;',
|
||||
start_date => sysdate,
|
||||
enabled => TRUE,
|
||||
auto_drop => TRUE,
|
||||
comments => 'one-time job');
|
||||
end;
|
||||
/
|
||||
|
||||
begin
|
||||
DBMS_SCHEDULER.create_job
|
||||
(
|
||||
job_name => 'JOB_FILECOPY2',
|
||||
job_type => 'PLSQL_BLOCK',
|
||||
job_action => 'BEGIN FILECOPY2; END;',
|
||||
start_date => sysdate,
|
||||
enabled => TRUE,
|
||||
auto_drop => TRUE,
|
||||
comments => 'one-time job');
|
||||
end;
|
||||
/
|
||||
|
||||
begin
|
||||
DBMS_SCHEDULER.create_job
|
||||
(
|
||||
job_name => 'JOB_FILECOPY3',
|
||||
job_type => 'PLSQL_BLOCK',
|
||||
job_action => 'BEGIN FILECOPY3; END;',
|
||||
start_date => sysdate,
|
||||
enabled => TRUE,
|
||||
auto_drop => TRUE,
|
||||
comments => 'one-time job');
|
||||
end;
|
||||
/
|
||||
|
||||
begin
|
||||
DBMS_SCHEDULER.create_job
|
||||
(
|
||||
job_name => 'JOB_FILECOPY4',
|
||||
job_type => 'PLSQL_BLOCK',
|
||||
job_action => 'BEGIN FILECOPY4; END;',
|
||||
start_date => sysdate,
|
||||
enabled => TRUE,
|
||||
auto_drop => TRUE,
|
||||
comments => 'one-time job');
|
||||
end;
|
||||
/
|
||||
|
||||
drop procedure FILECOPY1;
|
||||
drop procedure FILECOPY2;
|
||||
drop procedure FILECOPY3;
|
||||
drop procedure FILECOPY4;
|
||||
```
|
||||
14
tiddlywiki/DBMS_METADATA examples.txt
Executable file
14
tiddlywiki/DBMS_METADATA examples.txt
Executable file
@@ -0,0 +1,14 @@
|
||||
-- beautifully the output
|
||||
SET LONG 20000 LONGCHUNKSIZE 20000 PAGESIZE 0 LINESIZE 1000 FEEDBACK OFF VERIFY OFF TRIMSPOOL ON
|
||||
|
||||
BEGIN
|
||||
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, 'SQLTERMINATOR', true);
|
||||
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, 'PRETTY', true);
|
||||
END;
|
||||
/
|
||||
|
||||
-- for a TRIGGER
|
||||
SELECT DBMS_METADATA.get_ddl ('TRIGGER', trigger_name, owner)
|
||||
FROM all_triggers
|
||||
WHERE owner = '&OWNER'
|
||||
AND trigger_name = '&TRIGGER_NAME';
|
||||
3
tiddlywiki/DBMS_STATS - examples.txt
Executable file
3
tiddlywiki/DBMS_STATS - examples.txt
Executable file
@@ -0,0 +1,3 @@
|
||||
-- Dictionary and fixed objects (X$) stats
|
||||
execute dbms_stats.gather_dictionary_stats;
|
||||
execute dbms_stats.gather_fixed_objects_stats;
|
||||
6
tiddlywiki/DGMGRL commands.txt
Executable file
6
tiddlywiki/DGMGRL commands.txt
Executable file
@@ -0,0 +1,6 @@
|
||||
-- stop/start MRP on standby
|
||||
edit database 'DRF1DRPEXA' set state='APPLY-OFF';
|
||||
edit database 'DRF1DRPEXA' set state='ONLINE';
|
||||
-- display / set APPLY delay
|
||||
show database 'jabbadrp' delaymins
|
||||
edit database 'jabbadrp' set property delaymins=30;
|
||||
48
tiddlywiki/Data Generator & Swing Bench.md
Executable file
48
tiddlywiki/Data Generator & Swing Bench.md
Executable file
@@ -0,0 +1,48 @@
|
||||
> Author home page: [dominicgiles.com](http://www.dominicgiles.com)
|
||||
>
|
||||
Install JDK
|
||||
```bash
|
||||
dnf install java-1.8.0-openjdk.x86_64
|
||||
```
|
||||
|
||||
Create linux user and directories for Data Generator & Swing Bench
|
||||
```bash
|
||||
groupadd orabench
|
||||
useradd orabench -g orabench -G orabench
|
||||
mkdir -p /app/datagenerator
|
||||
mkdir -p /app/swingbench
|
||||
chown -R orabench:orabench /app/datagenerator /app/swingbench
|
||||
```
|
||||
|
||||
Download and run Data Generator
|
||||
```bash
|
||||
su - orabench
|
||||
wget http://www.dominicgiles.com/swingbench/datageneratorlatest.zip
|
||||
unzip datageneratorlatest.zip
|
||||
rm -rf datageneratorlatest.zip
|
||||
mv datagenerator stable
|
||||
|
||||
export DISPLAY=<your_X_server_IP>:0.0
|
||||
/app/datagenerator/stable/bin/datagenerator
|
||||
```
|
||||
|
||||
Depending of schemas to install, create corresponding schemas/tablespaces
|
||||
```sql
|
||||
create bigfile tablespace SH datafile size 64M autoextend ON next 64M;
|
||||
create user SH identified by SH default tablespace SH;
|
||||
grant connect,resource to SH;
|
||||
|
||||
create bigfile tablespace SOE datafile size 64M autoextend ON next 64M;
|
||||
create user SOE identified by SOE default tablespace SOE;
|
||||
grant connect,resource to SOE;
|
||||
```
|
||||
|
||||
Download and run Swing Bench
|
||||
```bash
|
||||
cd /app/swingbench/
|
||||
wget https://github.com/domgiles/swingbench-public/releases/download/production/swingbenchlatest.zip
|
||||
unzip swingbenchlatest.zip
|
||||
rm -rf swingbenchlatest.zip
|
||||
mv swingbench stable
|
||||
/app/swingbench/stable/bin/swingbench
|
||||
```
|
||||
149
tiddlywiki/Dataguard - sync using incremental backup.md
Executable file
149
tiddlywiki/Dataguard - sync using incremental backup.md
Executable file
@@ -0,0 +1,149 @@
|
||||
# Dataguard configuration
|
||||
|
||||
DGMGRL> show configuration
|
||||
|
||||
Configuration - asty
|
||||
|
||||
Protection Mode: MaxPerformance
|
||||
Members:
|
||||
astyprd - Primary database
|
||||
astydrp - Physical standby database
|
||||
|
||||
|
||||
DGMGRL> show database 'astydrp'
|
||||
|
||||
Database - astydrp
|
||||
|
||||
Role: PHYSICAL STANDBY
|
||||
Intended State: APPLY-ON
|
||||
Transport Lag: 0 seconds (computed 1 second ago)
|
||||
Apply Lag: 0 seconds (computed 1 second ago)
|
||||
Average Apply Rate: 803.00 KByte/s
|
||||
Real Time Query: OFF
|
||||
Instance(s):
|
||||
ASTYDRP
|
||||
|
||||
# Simulate a gap
|
||||
|
||||
Stop the standby database.
|
||||
|
||||
On primary, switch 3-4 times the archivelog on primary and delete all archived logs:
|
||||
|
||||
SQL> alter system archive log current;
|
||||
RMAN> delete noprompt force archivelog all;
|
||||
|
||||
To complicate the situation, add 2 new datafile and create a new tablespace on primary.
|
||||
|
||||
SQL> alter tablespace SYSTEM add datafile size 10M autoextend OFF;
|
||||
SQL> alter tablespace SYSAUX add datafile size 10M autoextend OFF;
|
||||
SQL> create tablespace NAL_HUTTA datafile size 10M autoextend ON next 10M;
|
||||
|
||||
Repeat switch/delete archivelog operation on primary:
|
||||
|
||||
SQL> alter system archive log current;
|
||||
RMAN> delete noprompt force archivelog all;
|
||||
|
||||
Start the standby database in **MOUNT** mode, let it trying to resolve the gap and check the status of the syncronisation.
|
||||
On primary:
|
||||
|
||||
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
|
||||
set lines 200
|
||||
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME)
|
||||
from gv$archived_log group by THREAD#;
|
||||
|
||||
On standby:
|
||||
|
||||
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
|
||||
set lines 200
|
||||
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME)
|
||||
from gv$archived_log
|
||||
where APPLIED='YES' group by THREAD#;
|
||||
|
||||
|
||||
# Syncronize the standby
|
||||
|
||||
Cancel **MRP** on standby:
|
||||
|
||||
DGMGRL> edit database 'astydrp' set STATE='LOG-APPLY-OFF';
|
||||
|
||||
Try to recover the standby and note down the required `SCN`:
|
||||
|
||||
SQL> recover standby database;
|
||||
|
||||
Normaly it should be the same as:
|
||||
|
||||
SQL> select 1+CURRENT_SCN from v$database;
|
||||
|
||||
On primary, identify all datafiles created after this `SCN`; in my example `SCN=5681090`
|
||||
|
||||
SQL> select FILE#,NAME from v$datafile where CREATION_CHANGE# >= 5681090;
|
||||
|
||||
Backup datafiles and generate a new standby controlfile:
|
||||
|
||||
run{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset datafile 17,18,19;
|
||||
release channel ch01;
|
||||
release channel ch02;
|
||||
release channel ch03;
|
||||
release channel ch04;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/temp/%d_%U_%s_%t.ctl';
|
||||
backup current controlfile;
|
||||
release channel ch01;
|
||||
}
|
||||
|
||||
|
||||
Restart the standby in mode **NOMOUNT** and restore the standby controfile:
|
||||
|
||||
RMAN> restore standby controlfile from '/mnt/yavin4/tmp/_oracle_/orabackup/temp/ASTY_0l1678fs_21_1_1_21_1113825788.ctl';
|
||||
|
||||
Alternatively, you can restore the standby controfile from active database:
|
||||
|
||||
RMAN> restore standby controlfile from service ASTYPRD_DGMGRL;
|
||||
|
||||
Mount the standby database:
|
||||
|
||||
RMAN> alter database mount;
|
||||
|
||||
Restore and new datafiles:
|
||||
|
||||
RMAN> restore datafile 17,18,19;
|
||||
|
||||
Catalog recovery area and old standby datafiles:
|
||||
|
||||
RMAN> catalog start with '/data/ASTYDRP' noprompt;
|
||||
RMAN> catalog start with '/fra/ASTYDRP' noprompt;
|
||||
|
||||
At this moment, because of fresh restored controlfile, Oracle see the datafiles as datafile copy:
|
||||
|
||||
RMAN> list datafilecopy all;
|
||||
|
||||
Switch database to copy:
|
||||
|
||||
RMAN> switch database to copy;
|
||||
|
||||
|
||||
To recover standby using *from SCN* backupset, we can proceed from active database or using physical backupset:
|
||||
|
||||
rman auxiliary /
|
||||
run {
|
||||
allocate channel pri1 device type DISK;
|
||||
allocate channel pri2 device type DISK;
|
||||
allocate channel pri3 device type DISK;
|
||||
allocate channel pri4 device type DISK;
|
||||
recover database from service ASTYPRD_DGMGRL using compressed backupset section size 8G;
|
||||
}
|
||||
|
||||
Clear standby redolog:
|
||||
|
||||
SQL> select 'ALTER DATABASE CLEAR LOGFILE GROUP '||GROUP#||';' from v$standby_log;
|
||||
|
||||
Enable **MRP**:
|
||||
|
||||
DGMGRL> edit database 'astydrp' set STATE='ONLINE';
|
||||
239
tiddlywiki/Dataguard 21c standalone creation - example.md
Executable file
239
tiddlywiki/Dataguard 21c standalone creation - example.md
Executable file
@@ -0,0 +1,239 @@
|
||||
Network configuration
|
||||
---------------------
|
||||
|
||||
`/etc/listener.ora` on primary server:
|
||||
|
||||
|
||||
LISTENER_DG =
|
||||
(ADDRESS_LIST=
|
||||
(ADDRESS=(PROTOCOL=tcp)(HOST=taris.swgalaxy)(PORT=1523))
|
||||
)
|
||||
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = ASTYPRD_DGMGRL)
|
||||
(SID_NAME = ASTYPRD)
|
||||
(ORACLE_HOME = /app/oracle/product/21)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
`/etc/listener.ora` on secondary server:
|
||||
|
||||
LISTENER_DG =
|
||||
(ADDRESS_LIST=
|
||||
(ADDRESS=(PROTOCOL=tcp)(HOST=mandalore.swgalaxy)(PORT=1523))
|
||||
)
|
||||
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = ASTYDRP_DGMGRL)
|
||||
(SID_NAME = ASTYDRP)
|
||||
(ORACLE_HOME = /app/oracle/product/21)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
Start `LISTENER_DG` on both servers:
|
||||
|
||||
lsnrctl start LISTENER_DG
|
||||
|
||||
|
||||
`/etc/tnsnames.ora` on both servers:
|
||||
|
||||
ASTYPRD_DGMGRL =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = taris.swgalaxy)(PORT = 1523))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = ASTYPRD_DGMGRL)
|
||||
)
|
||||
)
|
||||
|
||||
ASTYDRP_DGMGRL =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = mandalore.swgalaxy)(PORT = 1523))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = ASTYDRP_DGMGRL)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
Dataguard initial duplication
|
||||
-----------------------------
|
||||
|
||||
From primary init.ora, create an init.ora from secondary database, test-it starting the secondary database in nomount and create a spfile from this init.ora. Startup secondary database in nomount mode. Copy also the passwordfile from primry to secondary server.
|
||||
|
||||
Try cross connections from both primary and secondary servers:
|
||||
|
||||
sqlplus 'sys/"*****"'@ASTYPRD_DGMGRL as sysdba
|
||||
sqlplus 'sys/"*****"'@ASTYDRP_DGMGRL as sysdba
|
||||
|
||||
|
||||
Create standby redolog on primary database using the result of following queries:
|
||||
|
||||
select 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
select distinct 'ALTER DATABASE ADD STANDBY LOGFILE THREAD '||thread#||' size '||bytes||';' from v$log;
|
||||
|
||||
If you plan to use backups on standby database, set required RMAN parameters **prior** to duplicate step:
|
||||
|
||||
configure archivelog deletion policy to applied on all standby;
|
||||
configure db_unique_name 'ASTYDRP' connect identifier 'ASTYDRP_DGMGRL';
|
||||
configure db_unique_name 'ASTYPRD' connect identifier 'ASTYPRD_DGMGRL';
|
||||
|
||||
|
||||
Duplicate primary database *for standby*:
|
||||
|
||||
rman target='sys/"*****"'@ASTYPRD_DGMGRL auxiliary='sys/"*****"'@ASTYDRP_DGMGRL
|
||||
|
||||
run
|
||||
{
|
||||
allocate channel pri01 device type disk;
|
||||
allocate channel pri02 device type disk;
|
||||
allocate channel pri03 device type disk;
|
||||
allocate channel pri04 device type disk;
|
||||
allocate channel pri05 device type disk;
|
||||
allocate channel pri06 device type disk;
|
||||
allocate channel pri07 device type disk;
|
||||
allocate channel pri08 device type disk;
|
||||
allocate channel pri09 device type disk;
|
||||
allocate channel pri10 device type disk;
|
||||
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
allocate auxiliary channel aux05 device type disk;
|
||||
allocate auxiliary channel aux06 device type disk;
|
||||
allocate auxiliary channel aux07 device type disk;
|
||||
allocate auxiliary channel aux08 device type disk;
|
||||
allocate auxiliary channel aux09 device type disk;
|
||||
allocate auxiliary channel aux10 device type disk;
|
||||
|
||||
duplicate database 'ASTY' for standby
|
||||
from active database using compressed backupset section size 512M;
|
||||
}
|
||||
|
||||
|
||||
It is nor mandatory but recommanded to activate flashback on both databases (leaving for exmple then default retention target of 1 day):
|
||||
|
||||
alter database flashback ON;
|
||||
|
||||
|
||||
Dataguard broker configuration
|
||||
------------------------------
|
||||
|
||||
On primary database:
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ASTYPRD/dgmgrl/dr1ASTYPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ASTYPRD/dgmgrl/dr2ASTYPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
On secondary database:
|
||||
|
||||
alter system set dg_broker_config_file1='/app/oracle/base/admin/ASTYDRP/dgmgrl/dr1ASTYDRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='/app/oracle/base/admin/ASTYDRP/dgmgrl/dr2ASTYDRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
On primary or secondary server:
|
||||
|
||||
dgmgrl
|
||||
connect sys/*****@ASTYPRD_DGMGRL
|
||||
|
||||
create configuration ASTY as
|
||||
primary database is ASTYPRD
|
||||
connect identifier is ASTYPRD_DGMGRL;
|
||||
|
||||
add database ASTYDRP
|
||||
as connect identifier is ASTYDRP_DGMGRL
|
||||
maintained as physical;
|
||||
|
||||
enable configuration;
|
||||
|
||||
edit database 'astyprd' set property ArchiveLagTarget=0;
|
||||
edit database 'astyprd' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'astyprd' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'astyprd' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit database 'astydrp' set property ArchiveLagTarget=0;
|
||||
edit database 'astydrp' set property LogArchiveMaxProcesses=2;
|
||||
edit database 'astydrp' set property LogArchiveMinSucceedDest=1;
|
||||
edit database 'astydrp' set property StandbyFileManagement='AUTO';
|
||||
|
||||
edit instance 'ASTYPRD' set property 'StaticConnectIdentifier'='ASTYPRD_DGMGRL';
|
||||
edit instance 'ASTYDRP' set property 'StaticConnectIdentifier'='ASTYPRD_DGMGRL';
|
||||
|
||||
edit instance 'ASTYPRD' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=taris.swgalaxy)(PORT=1523))(CONNECT_DATA=(SERVICE_NAME=ASTYPRD_DGMGRL)(INSTANCE_NAME=ASTYPRD)(SERVER=DEDICATED)))';
|
||||
edit instance 'ASTYDRP' set property 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=mandalore.swgalaxy)(PORT=1523))(CONNECT_DATA=(SERVICE_NAME=ASTYDRP_DGMGRL)(INSTANCE_NAME=ASTYDRP)(SERVER=DEDICATED)))';
|
||||
|
||||
|
||||
Wait a couple of minutes (after eventually archiving current log on primary database) therefore:
|
||||
|
||||
show configuration
|
||||
show database 'astyprd'
|
||||
show database 'astydrp'
|
||||
|
||||
validate database 'astyprd'
|
||||
validate database 'astydrp'
|
||||
|
||||
|
||||
To disable/enable redo apply on standby database:
|
||||
|
||||
edit database 'astydrp' set state='APPLY-OFF';
|
||||
edit database 'astydrp' set state='ONLINE';
|
||||
|
||||
|
||||
|
||||
Backup primary and standby databases
|
||||
------------------------------------
|
||||
|
||||
Backup primary database:
|
||||
|
||||
rman target /
|
||||
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYPRD/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYPRD/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYPRD/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYPRD/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||
release channel ch01;
|
||||
release channel ch02;
|
||||
release channel ch03;
|
||||
release channel ch04;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYPRD/%d_%U_%s_%t.controlfile';
|
||||
backup current controlfile;
|
||||
release channel ch01;
|
||||
}
|
||||
|
||||
|
||||
Backup standby database:
|
||||
|
||||
rman target='"sys/*****"'
|
||||
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYDRP/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYDRP/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYDRP/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYDRP/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database section size 2G include current controlfile plus archivelog delete input;
|
||||
release channel ch01;
|
||||
release channel ch02;
|
||||
release channel ch03;
|
||||
release channel ch04;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/ASTYDRP/%d_%U_%s_%t.controlfile';
|
||||
backup current controlfile;
|
||||
release channel ch01;
|
||||
}
|
||||
|
||||
243
tiddlywiki/Dataguard CDB 12.1 exemple.txt
Executable file
243
tiddlywiki/Dataguard CDB 12.1 exemple.txt
Executable file
@@ -0,0 +1,243 @@
|
||||
~~ creation of CDB database
|
||||
|
||||
$ORACLE_HOME/bin/dbca \
|
||||
-silent \
|
||||
-createDatabase \
|
||||
-templateName General_Purpose.dbc \
|
||||
-gdbName EWOK \
|
||||
-sid EWOKPRD \
|
||||
-initParams db_unique_name=EWOKPRD \
|
||||
-characterSet AL32UTF8 \
|
||||
-sysPassword secret \
|
||||
-systemPassword secret \
|
||||
-emConfiguration NONE \
|
||||
-createAsContainerDatabase TRUE \
|
||||
-storageType ASM \
|
||||
-diskGroupName DATA \
|
||||
-redoLogFileSize 100 \
|
||||
-sampleSchema FALSE \
|
||||
-totalMemory 2048 \
|
||||
-databaseConfType RAC \
|
||||
-nodelist vortex-db01,vortex-db02
|
||||
|
||||
|
||||
~~ identify the spfile and passwordfile ASM location and more readable aliases
|
||||
srvctl config database -d EWOKPRD
|
||||
|
||||
ASMCMD [+] > cd +DATA/EWOKPRD/
|
||||
ASMCMD [+DATA/EWOKPRD] > mkalias +DATA/EWOKPRD/PARAMETERFILE/spfile.333.957718565 spfileewokprd.ora
|
||||
ASMCMD [+DATA/EWOKPRD] > mkalias +DATA/EWOKPRD/PASSWORD/pwdewokprd.308.957717627 orapwewokprd
|
||||
|
||||
~~ update spfile location in the CRS
|
||||
srvctl modify database -db EWOKPRD -spfile +DATA/EWOKPRD/spfileewokprd.ora
|
||||
srvctl modify database -db EWOKPRD -pwfile +DATA/EWOKPRD/orapwewokprd
|
||||
srvctl stop database -d EWOKPRD
|
||||
srvctl start database -d EWOKPRD
|
||||
srvctl status database -d EWOKPRD -v
|
||||
|
||||
|
||||
~~ enable ARCHIVELG mode and FORCE LOGGING on the PRIMARY database
|
||||
|
||||
alter system set db_recovery_file_dest_size = 4G scope=both sid='*';
|
||||
alter system set db_recovery_file_dest = '+RECO' scope=both sid='*';
|
||||
alter system set log_archive_dest_1 = 'location=USE_DB_RECOVERY_FILE_DEST' scope=both sid='*';
|
||||
|
||||
srvctl stop database -d EWOKPRD
|
||||
|
||||
startup mount exclusive
|
||||
alter database archivelog;
|
||||
alter database open;
|
||||
alter database force logging;
|
||||
|
||||
srvctl stop database -d EWOKPRD
|
||||
srvctl start database -d EWOKPRD
|
||||
|
||||
alter system archive log current;
|
||||
|
||||
~~ copy pfile and passwordfile from primary cluster to first node of the stabdby cluster
|
||||
|
||||
SQL> create pfile='/tmp/pfile_EWOK.ora' from spfile;
|
||||
asmcmd cp +DATA/EWOKPRD/orapwewokprd /tmp
|
||||
cd /tmp
|
||||
scp orapwewokprd pfile_EWOK.ora kessel-db01/tmp
|
||||
|
||||
~~ make adjustements in pfile and put all in $ORACLE_HOME/dbs
|
||||
|
||||
SQL> create spfile from pfile='/tmp/standby.ora';
|
||||
cp orapwewokprd $ORACLE_HOME/dbs/orapwEWOKDRP1
|
||||
|
||||
SQL> startup nomount
|
||||
|
||||
~~ NETWORK configuration - listeners
|
||||
~~ in my confoguration I have a dedicated listener for DATAGUARD; following definitions has been added on primary cluster:
|
||||
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = EWOKPRD_DGMGRL)
|
||||
(SID_NAME = EWOKPRD1)
|
||||
(ORACLE_HOME = /app/oracle/product/12.1/db_1)
|
||||
)
|
||||
)
|
||||
|
||||
# ...For DATAGUARD
|
||||
|
||||
~~ and on standby cluster:
|
||||
|
||||
# For DATAGUARD...
|
||||
SID_LIST_LISTENER_DG =
|
||||
(SID_LIST =
|
||||
(SID_DESC =
|
||||
(GLOBAL_DBNAME = EWOKDRP_DGMGRL)
|
||||
(SID_NAME = EWOKDRP1)
|
||||
(ORACLE_HOME = /app/oracle/product/12.1/db_1)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
~~ cross connection tests; we should be able to connect to iddle instances too
|
||||
sqlplus /nolog
|
||||
connect sys/secret@vortex-db01-dba-vip:1541/EWOKPRD_DGMGRL as sysdba
|
||||
connect sys/secret@vortex-db02-dba-vip:1541/EWOKPRD_DGMGRL as sysdba
|
||||
connect sys/secret@kessel-db01-dba-vip:1541/EWOKDRP_DGMGRL as sysdba
|
||||
(for the moment the standby pfile/passwordfile are not deployed on second node of the standby cluster)
|
||||
|
||||
~~ aliases to add on tnsnames.ora on all primary/standby database nodes
|
||||
# For DATAGUARD...
|
||||
EWOKPRD_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = vortex-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = EWOKPRD_DGMGRL)
|
||||
)
|
||||
)
|
||||
|
||||
EWOKDRP_DG =
|
||||
(DESCRIPTION =
|
||||
(FAILOVER = YES)
|
||||
(ADDRESS_LIST =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db01-dba-vip)(PORT = 1541))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = kessel-db02-dba-vip)(PORT = 1541))
|
||||
)
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = EWOKDRP_DGMGRL)
|
||||
)
|
||||
)
|
||||
# ...For DATAGUARD
|
||||
|
||||
|
||||
~~ cross connexion test using TNS aliases; we should be able to connect to iddle instances
|
||||
|
||||
sqlplus /nolog
|
||||
connect sys/secret@EWOKPRD_DG as sysdba
|
||||
connect sys/secret@EWOKDRP_DG as sysdba
|
||||
|
||||
|
||||
~~ from the spfile of primary DB we create an spfile for the secondary DB and we start thesecondary DB in nomount
|
||||
rman target sys/secret@EWOKPRD_DG auxiliary sys/secret@EWOKDRP_DG
|
||||
run {
|
||||
allocate channel pri1 device type DISK;
|
||||
allocate channel pri2 device type DISK;
|
||||
allocate auxiliary channel aux1 device type DISK;
|
||||
allocate auxiliary channel aux2 device type DISK;
|
||||
duplicate target database
|
||||
for standby
|
||||
from active database
|
||||
nofilenamecheck
|
||||
using compressed backupset section size 1G;
|
||||
}
|
||||
|
||||
|
||||
~~ Dataguard Broker configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ on primary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/EWOKPRD/dr1EWOKPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/EWOKPRD/dr2EWOKPRD.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
~~ on secondary database
|
||||
alter system set dg_broker_start=FALSE scope=both sid='*';
|
||||
alter system set dg_broker_config_file1='+DATA/EWOKDRP/dr1EWOKDRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_config_file2='+DATA/EWOKDRP/dr2EWOKFRP.dat' scope=both sid='*';
|
||||
alter system set dg_broker_start=TRUE scope=both sid='*';
|
||||
|
||||
~~ creation of STANDBY REDELOG on both databases
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 1 size 100M;
|
||||
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
ALTER DATABASE ADD STANDBY LOGFILE thread 2 size 100M;
|
||||
|
||||
|
||||
select GROUP#,THREAD#,STATUS, BYTES from v$standby_log;
|
||||
|
||||
col MEMBER for a60
|
||||
select * from v$logfile;
|
||||
|
||||
|
||||
~~ create DGMGRL configuration
|
||||
dgmgrl
|
||||
DGMGRL> connect sys/secret@EWOKPRD_DG
|
||||
DGMGRL> create configuration EWOK as
|
||||
primary database is EWOKPRD
|
||||
connect identifier is EWOKPRD_DG;
|
||||
DGMGRL> add database EWOKDRP
|
||||
as connect identifier is EWOKDRP_DG
|
||||
maintained as physical;
|
||||
|
||||
DGMGRL> edit database 'ewokdrp' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'ewokdrp' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'ewokdrp' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'ewokdrp' set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit database 'ewokdrp' set property set property TransportDisconnectedThreshold='0';
|
||||
|
||||
DGMGRL> edit database 'ewokprd' set property ArchiveLagTarget=0;
|
||||
DGMGRL> edit database 'ewokprd' set property LogArchiveMaxProcesses=2;
|
||||
DGMGRL> edit database 'ewokprd' set property LogArchiveMinSucceedDest=1;
|
||||
DGMGRL> edit database 'ewokprd' set property StandbyFileManagement='AUTO';
|
||||
DGMGRL> edit database 'ewokprd' set property set property TransportDisconnectedThreshold='0';
|
||||
|
||||
DGMGRL> enable configuration;
|
||||
DGMGRL> show configuration;
|
||||
|
||||
~~ VERY IMPORANT
|
||||
~~ set StaticConnectIdentifier for all PRIMARY/DATAGUARD database instances
|
||||
~~ use complete DESCRIPTION syntax to uniquely identifiing the instances of each node
|
||||
|
||||
EDIT INSTANCE 'EWOKPRD1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKPRD_DGMGRL)(INSTANCE_NAME=EWOKPRD1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKPRD2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=vortex-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKPRD_DGMGRL)(INSTANCE_NAME=EWOKPRD2)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKDRP1' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db01-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKDRP_DGMGRL)(INSTANCE_NAME=EWOKDRP1)(SERVER=DEDICATED)))';
|
||||
EDIT INSTANCE 'EWOKDRP2' SET PROPERTY 'StaticConnectIdentifier'='(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=kessel-db02-dba-vip)(PORT=1541))(CONNECT_DATA=(SERVICE_NAME=EWOKDRP_DGMGRL)(INSTANCE_NAME=EWOKDRP2)(SERVER=DEDICATED)))';
|
||||
|
||||
|
||||
~~ move spfile from file system to ASM
|
||||
create pfile='/tmp/pfile_EWOKDRP.ora' from spfile;
|
||||
create spfile ='+DATA/ewokdrp/spfileEWOKDRP.ora' from pfile='/tmp/pfile_EWOKDRP.ora';
|
||||
|
||||
~~ register standby database in the CRS
|
||||
srvctl add database -d EWOKDRP -o /app/oracle/product/12.1/db_1 -c RAC -p '+DATA/EWOKDRP/spfileEWOKDRP.ora' -r physical_standby -n EWOK
|
||||
|
||||
~~ pay attention to -s <start_options>; the default value is OPEN, that means that your DATAGUARD will be OPENED (active DATAGUARD)
|
||||
srvctl add instance -d EWOKDRP -i EWOKDRP1 -n kessel-db01
|
||||
srvctl add instance -d EWOKDRP -i EWOKDRP2 -n kessel-db02
|
||||
|
||||
srvctl start database -d EWOKDRP -o mount
|
||||
srvctl status database -d EWOKDRP -v
|
||||
|
||||
~~ finally, move passwordfile to ASM using pwcopy under asmcmd
|
||||
asmcmd pwcopy +DATA/EWOKPRD/orapwewokprd /tmp/orapwewokprd
|
||||
scp /tmp/orapwewokprd kessel-db01:/tmp/orapwewokprd
|
||||
asmcmd pwcopy /tmp/orapwewokprd +DATA/EWOKDRP/orapwewokdrp
|
||||
11
tiddlywiki/Dataguard archivelog apply check.txt
Executable file
11
tiddlywiki/Dataguard archivelog apply check.txt
Executable file
@@ -0,0 +1,11 @@
|
||||
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';
|
||||
set lines 200
|
||||
|
||||
-- on PRIMARY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log group by THREAD#;
|
||||
|
||||
-- on STANDBY database
|
||||
----------------------
|
||||
select THREAD#, max(SEQUENCE#), max(FIRST_TIME),max(NEXT_TIME),max(COMPLETION_TIME) from gv$archived_log
|
||||
where APPLIED='YES' group by THREAD#;
|
||||
9
tiddlywiki/Divers.tid
Executable file
9
tiddlywiki/Divers.tid
Executable file
@@ -0,0 +1,9 @@
|
||||
color: #ff80ff
|
||||
created: 20191026073349424
|
||||
creator: vplesnila
|
||||
modified: 20200203165611842
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: Divers
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
49
tiddlywiki/Docker notes.md
Executable file
49
tiddlywiki/Docker notes.md
Executable file
@@ -0,0 +1,49 @@
|
||||
Install Docker
|
||||
--------------
|
||||
|
||||
dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
|
||||
dnf install -y docker-ce --nobest
|
||||
|
||||
systemctl enable --now docker
|
||||
systemctl status docker
|
||||
|
||||
|
||||
Docker is installed by default on `/var/lib/docker`. To move it on other file system, for example `/app/docker`:
|
||||
|
||||
systemctl stop docker
|
||||
|
||||
cd /var/lib/
|
||||
mv docker /app/
|
||||
ln -s /app/docker .
|
||||
|
||||
systemctl start docker
|
||||
systemctl status docker
|
||||
|
||||
Usefull commands
|
||||
----------------
|
||||
|
||||
Alias to stop and remove all containers:
|
||||
|
||||
alias cclean='docker stop $(docker ps -a -q); docker rm $(docker ps -a -q)'
|
||||
|
||||
Truncate all container logs:
|
||||
|
||||
truncate -s 0 $(docker inspect --format='{{.LogPath}}' $(docker ps -a -q))
|
||||
|
||||
Save an image:
|
||||
|
||||
docker save sabnzbd/sabnzbd | pigz > /mnt/yavin4/tmp/sabnzbd.tar.gz
|
||||
|
||||
Load an image:
|
||||
|
||||
gunzip -c /mnt/yavin4/tmp/sabnzbd.tar.gz | docker load
|
||||
|
||||
Set auto start for all host containers:
|
||||
|
||||
docker update --restart unless-stopped $(docker ps -q)
|
||||
|
||||
Run command in bash:
|
||||
|
||||
docker exec -it <container> bash
|
||||
|
||||
|
||||
5
tiddlywiki/Draft of 'New Tiddler' by vplesnila.tid
Executable file
5
tiddlywiki/Draft of 'New Tiddler' by vplesnila.tid
Executable file
@@ -0,0 +1,5 @@
|
||||
created: 20200225085307862
|
||||
modified: 20200225085307862
|
||||
modifier: vplesnila
|
||||
title: Draft of 'New Tiddler' by vplesnila
|
||||
type: text/vnd.tiddlywiki
|
||||
4
tiddlywiki/Draft of 'ssh - ProxyJump' by vplesnila.tid
Executable file
4
tiddlywiki/Draft of 'ssh - ProxyJump' by vplesnila.tid
Executable file
@@ -0,0 +1,4 @@
|
||||
modified: 20220703081209354
|
||||
modifier: vplesnila
|
||||
title: Draft of 'ssh - ProxyJump' by vplesnila
|
||||
type: text/vnd.tiddlywiki
|
||||
8
tiddlywiki/Draft.tid
Executable file
8
tiddlywiki/Draft.tid
Executable file
@@ -0,0 +1,8 @@
|
||||
created: 20190628135534599
|
||||
creator: vplesnila
|
||||
modified: 20190628135556880
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: Draft
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
8
tiddlywiki/English.tid
Executable file
8
tiddlywiki/English.tid
Executable file
@@ -0,0 +1,8 @@
|
||||
color: #ff8080
|
||||
created: 20191107141106371
|
||||
creator: vplesnila
|
||||
modified: 20191107143338459
|
||||
modifier: vplesnila
|
||||
tags: Divers
|
||||
title: English
|
||||
type: text/vnd.tiddlywiki
|
||||
29
tiddlywiki/Enterprise Manager Database Express setup.md
Executable file
29
tiddlywiki/Enterprise Manager Database Express setup.md
Executable file
@@ -0,0 +1,29 @@
|
||||
Setup for CDB with all PDB on the same port:
|
||||
|
||||
select dbms_xdb_config.gethttpsport() from dual;
|
||||
exec dbms_xdb_config.sethttpsport(5500);
|
||||
exec dbms_xdb_config.sethttpport(5511);
|
||||
alter system set dispatchers='(PROTOCOL=TCP)(SERVICE=THANASPRD)';
|
||||
exec dbms_xdb_config.setglobalportenabled(TRUE);
|
||||
alter system register;
|
||||
|
||||
|
||||
Acces URL: https://192.168.0.64:5500/em
|
||||
Acces URL: http://192.168.0.64:5511/em
|
||||
|
||||
|
||||
Setup for CDB with each PDB on different port:
|
||||
|
||||
On CDB$ROOT:
|
||||
|
||||
exec dbms_xdb_config.setglobalportenabled(FALSE);
|
||||
|
||||
On PDB:
|
||||
|
||||
alter session set container=NEREUS;
|
||||
select dbms_xdb_config.gethttpsport() from dual;
|
||||
exec dbms_xdb_config.sethttpsport(5555);
|
||||
alter system register;`
|
||||
|
||||
|
||||
Acces URL: https://192.168.0.64:5555/em
|
||||
8
tiddlywiki/Environement_variable_of_a_running_process.txt
Executable file
8
tiddlywiki/Environement_variable_of_a_running_process.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
AIX:
|
||||
ps eww <pid>
|
||||
|
||||
SOLARIS:
|
||||
pargs -e <pid>
|
||||
|
||||
LINUX:
|
||||
cat /proc/<pid>/environ | tr '\0' '\n'
|
||||
29
tiddlywiki/Failover example.txt
Executable file
29
tiddlywiki/Failover example.txt
Executable file
@@ -0,0 +1,29 @@
|
||||
###Check max sequence is ok on both dataguard and cascades both are in sync
|
||||
|
||||
## normally this part is ok ,Check the protection mode and role in standby cascade database it should be maximum performance, if not change to maximum performance. Once mode is fine
|
||||
SELECT OPEN_MODE,PROTECTION_MODE,DATABASE_ROLE FROM V$DATABASE; OPEN_MODE PROTECTION_MODE DATABASE_ROLE
|
||||
NOTE: If protection_mode is other than maximum performance, then alter it as below.
|
||||
SQL> ALTER DATABASE SET STANDBY DATABASE TO MAXIMIZE PERFORMANCE;
|
||||
|
||||
##Stop recovery on both dataguard and cascades
|
||||
recover managed standby database cancel;
|
||||
####Now Action on cascades only :
|
||||
###Now activate standby
|
||||
alter database recover managed standby database finish;
|
||||
alter database activate standby database;
|
||||
##Check the role and mode of the database cascade, it should be primary now
|
||||
select name,open_mode,database_role from v$database;
|
||||
#### open database and bounce , recovery should be stopped to avoid active dataguard flag .
|
||||
Alter databas open;
|
||||
Bounce your database and verify database name its open mode and its role.
|
||||
select database_role from v$database;
|
||||
|
||||
###Change dbname with nid utility
|
||||
Step:1 Mount the database with old db name(standby)
|
||||
Step:2 Run the nid utility (syntax: nid sys/password@CURRENT_DBNAME DBNAME=NEW_DBNAME)
|
||||
Step:3 Once you run the nid utility the name will be changed to new db name.
|
||||
Step:4 Then you have to change the db_name in the parameter file. Using alter system command and start db in nomount to check ok .
|
||||
Step:5 change the spfile to a new db name. Check spfile name correct with new dbname.
|
||||
Step:6 Now open the database with reset logs option.
|
||||
Step7: register database information for listener alter system register.
|
||||
###And check connection using sqlplus from client .
|
||||
4
tiddlywiki/Flask tutorials.txt
Executable file
4
tiddlywiki/Flask tutorials.txt
Executable file
@@ -0,0 +1,4 @@
|
||||
https://www.fullstackpython.com/flask.html
|
||||
|
||||
https://testdriven.io/blog/developing-a-single-page-app-with-flask-and-vuejs/#conclusion
|
||||
|
||||
9
tiddlywiki/Generate Rebuild Index commands.txt
Executable file
9
tiddlywiki/Generate Rebuild Index commands.txt
Executable file
@@ -0,0 +1,9 @@
|
||||
set lines 256 pages 0
|
||||
|
||||
select 'alter index "'|| owner || '"."' || index_name || '" rebuild online compute statistics;'
|
||||
from
|
||||
dba_indexes
|
||||
where
|
||||
owner='DRIVE' and
|
||||
table_name in ('FUEL_INTERIM_TRANS_HEADERS', 'FUEL_INTERIM_TRANS_DETAILS', 'FUEL_TRANSACTION_ERRORS');
|
||||
|
||||
54
tiddlywiki/HAProxy - configuration example with HTTP_HTTPS_SSH_VPN.txt
Executable file
54
tiddlywiki/HAProxy - configuration example with HTTP_HTTPS_SSH_VPN.txt
Executable file
@@ -0,0 +1,54 @@
|
||||
-- host IP address is 192.168.0.8
|
||||
-- Apache use ports 9080/90443
|
||||
-- all HTTP requests on 80 will be redirected to 9080 except flower.databasepro.fr wich will go on 192.168.0.82:80
|
||||
-- incoming HTTP 443 requests will be redirected to 9433 except flower.databasepro.fr wich will go on 192.168.0.82:443
|
||||
-- incoming SSH requests on 443 port will be redirected to the port 22
|
||||
-- incoming OpenVPN requests on 443 port will be redirected to 192.168.0.9:1194
|
||||
|
||||
|
||||
frontend in_80
|
||||
bind 192.168.0.8:80
|
||||
default_backend out_80_default
|
||||
# Define hosts
|
||||
acl host_flower hdr(host) -i flower.databasepro.fr
|
||||
# Figure out which one to use
|
||||
use_backend out_80_flower if host_flower
|
||||
|
||||
backend out_80_default
|
||||
server sv1 192.168.0.8:9080 maxconn 32
|
||||
|
||||
backend out_80_flower
|
||||
server sv1 192.168.0.82:80 maxconn 32
|
||||
|
||||
|
||||
frontend in_443
|
||||
bind 192.168.0.8:443
|
||||
mode tcp
|
||||
option tcplog
|
||||
tcp-request inspect-delay 5s
|
||||
tcp-request content accept if HTTP
|
||||
# Define hosts
|
||||
acl host_flower hdr(host) -i flower.databasepro.fr
|
||||
# Figure out which one to use
|
||||
use_backend out_443_flower if { req_ssl_sni -i flower.databasepro.fr }
|
||||
use_backend out_443_https if { req.ssl_hello_type 1 }
|
||||
use_backend out_ssh if { payload(0,7) -m bin 5353482d322e30 }
|
||||
default_backend openvpn
|
||||
|
||||
backend out_443_flower
|
||||
server sv1 192.168.0.82:443
|
||||
mode tcp
|
||||
|
||||
|
||||
backend out_443_https
|
||||
server sv1 192.168.0.8:9443
|
||||
mode tcp
|
||||
|
||||
backend openvpn
|
||||
mode tcp
|
||||
server openvpn-server 192.168.0.9:1194
|
||||
|
||||
backend out_ssh
|
||||
mode tcp
|
||||
timeout server 2h
|
||||
server ssh-local 192.168.0.8:22
|
||||
@@ -0,0 +1,4 @@
|
||||
~~ prior running runInstaller from 11gR2 distribution, execute the following command as oracle user
|
||||
$GRID_HOME/oui/bin/runInstaller -ignoreSysPrereqs -updateNodeList \
|
||||
ORACLE_HOME=/app/grid/product/19/ "CLUSTER_NODES=vortex-db01.swgalaxy,vortex-db02.swgalaxy" \
|
||||
CRS=true LOCAL_NODE=vortex-db01.swgalaxy
|
||||
8
tiddlywiki/KVM - rename VM example.txt
Executable file
8
tiddlywiki/KVM - rename VM example.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
# rename atrisia3 to ivera-mongo03 changing also the storage path
|
||||
|
||||
virsh dumpxml atrisia3 > atrisia3.xml
|
||||
sed -i 's/atrisia3/ivera-mongo03/g' atrisia3.xml
|
||||
sed -i 's/\/vm\/hdd0\/ivera-mongo03/\/vm\/hdd0\/ivera-mongodb\/ivera-mongo03/g' atrisia3.xml
|
||||
mv /vm/hdd0/atrisia3 /vm/hdd0/ivera-mongodb/ivera-mongo03
|
||||
virsh undefine atrisia3 --remove-all-storage
|
||||
virsh define --file atrisia3.xml
|
||||
132
tiddlywiki/KVM - some example commands to create Oracle RAC.txt
Executable file
132
tiddlywiki/KVM - some example commands to create Oracle RAC.txt
Executable file
@@ -0,0 +1,132 @@
|
||||
qemu-img create -f raw /vm/hdd0/mandalore/hdd_01.img 8G
|
||||
qemu-img create -f raw /vm/hdd0/mandalore/swap_01.img 16G
|
||||
qemu-img create -f raw /vm/hdd0/mandalore/app_01.img 60G
|
||||
|
||||
|
||||
|
||||
virt-install \
|
||||
--graphics vnc,listen=0.0.0.0 \
|
||||
--name=mandalore \
|
||||
--vcpus=4 \
|
||||
--memory=32768 \
|
||||
--network bridge=br0 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/mnt/yavin4/kit/Oracle/OEL7/V1003434-01.iso \
|
||||
--disk /vm/hdd0/mandalore/hdd_01.img \
|
||||
--disk /vm/hdd0/mandalore/swap_01.img \
|
||||
--disk /vm/hdd0/mandalore/app_01.img \
|
||||
--os-variant=ol7.6
|
||||
|
||||
|
||||
|
||||
qemu-img create -f raw /vm/hdd0/mandalore/app_02.img 30G
|
||||
virsh attach-disk mandalore /vm/hdd0/mandalore/app_02.img vdd --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
|
||||
lvextend -l +100%FREE /dev/vg_app/lv_app
|
||||
xfs_growfs /app
|
||||
|
||||
|
||||
|
||||
dd if=/dev/zero of=/vm/hdd0/mandalore/data_01.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/hdd0/mandalore/data_02.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/hdd0/mandalore/fra_01.img bs=1G count=20
|
||||
|
||||
qemu-img create -f raw /vm/hdd0/mandalore/data_01.img 20G
|
||||
|
||||
virsh attach-disk mandalore --source /vm/hdd0/mandalore/data_01.img --target vde --persistent
|
||||
virsh attach-disk mandalore --source /vm/hdd0/mandalore/data_02.img --target vdf --persistent
|
||||
virsh attach-disk mandalore --source /vm/hdd0/mandalore/fra_01.img --target vdg --persistent
|
||||
|
||||
vgcreate vg_data /dev/vde /dev/vdf
|
||||
vgcreate vg_fra /dev/vdg
|
||||
|
||||
|
||||
lvcreate -n lv_data -l 100%FREE vg_data
|
||||
lvcreate -n lv_fra -l 100%FREE vg_fra
|
||||
|
||||
mkfs.xfs /dev/vg_data/lv_data
|
||||
mkfs.xfs /dev/vg_fra/lv_fra
|
||||
|
||||
|
||||
virsh detach-disk --domain mandalore /vm/hdd0/mandalore/data_01.img --persistent --config --live
|
||||
virsh attach-interface --domain vortex-db01 --type network \
|
||||
--source br0 \
|
||||
--model virtio \
|
||||
--config --live
|
||||
|
||||
virsh attach-interface --domain vortex-db01 --type bridge --source br0 --model virtio --config --live
|
||||
|
||||
dd if=/dev/zero of=/vm/ssd0/vortex-rac/disk_array/asm_01.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/ssd0/vortex-rac/disk_array/asm_02.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/ssd0/vortex-rac/disk_array/asm_03.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/ssd0/vortex-rac/disk_array/asm_04.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/ssd0/vortex-rac/disk_array/asm_05.img bs=1G count=20
|
||||
|
||||
|
||||
virsh domblklist vortex-db01 --details
|
||||
|
||||
|
||||
virsh attach-disk vortex-db01 --source /vm/ssd0/vortex-rac/disk_array/asm_01.img --target vde --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/ssd0/vortex-rac/disk_array/asm_02.img --target vdf --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/ssd0/vortex-rac/disk_array/asm_03.img --target vdg --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/ssd0/vortex-rac/disk_array/asm_04.img --target vdh --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/ssd0/vortex-rac/disk_array/asm_05.img --target vdi --persistent
|
||||
|
||||
virsh attach-disk vortex-db02 --source /vm/ssd0/vortex-rac/disk_array/asm_01.img --target vde --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/ssd0/vortex-rac/disk_array/asm_02.img --target vdf --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/ssd0/vortex-rac/disk_array/asm_03.img --target vdg --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/ssd0/vortex-rac/disk_array/asm_04.img --target vdh --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/ssd0/vortex-rac/disk_array/asm_05.img --target vdi --persistent
|
||||
|
||||
|
||||
# need PARTITIONS for ASM disk
|
||||
fdisk /dev/vdXXXXX
|
||||
|
||||
groupadd -g 54327 asmoper
|
||||
groupadd -g 54328 asmdba
|
||||
groupadd -g 54329 asmadmin
|
||||
|
||||
useradd -g oinstall -G asmoper,asmdba,asmadmin -c "Grid Infrastructure Owner" grid
|
||||
usermod -g oinstall -G asmdba,dba,oper -c "Oracle Sotfware Owner" oracle
|
||||
|
||||
|
||||
systemctl stop firewalld.service
|
||||
systemctl disable firewalld.service
|
||||
|
||||
yum install -y kmod-oracleasm.x86_64 oracleasm-support
|
||||
oracleasm configure -i
|
||||
(choose grid for user and asmdba for group)
|
||||
oracleasm init
|
||||
|
||||
|
||||
oracleasm createdisk DATA_01 /dev/vde1
|
||||
oracleasm createdisk DATA_02 /dev/vdf1
|
||||
oracleasm createdisk DATA_03 /dev/vdg1
|
||||
oracleasm createdisk DATA_04 /dev/vdh1
|
||||
oracleasm createdisk DATA_05 /dev/vdi1
|
||||
|
||||
|
||||
|
||||
dd if=/dev/zero of=/vm/hdd0/vortex-rac/disk_array/asm_fra_01.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/hdd0/vortex-rac/disk_array/asm_fra_02.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/hdd0/vortex-rac/disk_array/asm_fra_03.img bs=1G count=20
|
||||
dd if=/dev/zero of=/vm/hdd0/vortex-rac/disk_array/asm_fra_04.img bs=1G count=20
|
||||
|
||||
|
||||
virsh attach-disk vortex-db01 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_01.img --target vdj --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_02.img --target vdk --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_03.img --target vdl --persistent
|
||||
virsh attach-disk vortex-db01 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_04.img --target vdm --persistent
|
||||
|
||||
|
||||
virsh attach-disk vortex-db02 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_01.img --target vdj --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_02.img --target vdk --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_03.img --target vdl --persistent
|
||||
virsh attach-disk vortex-db02 --source /vm/hdd0/vortex-rac/disk_array/asm_fra_04.img --target vdm --persistent
|
||||
|
||||
|
||||
|
||||
oracleasm createdisk RECO_01 /dev/vdj1
|
||||
oracleasm createdisk RECO_02 /dev/vdk1
|
||||
oracleasm createdisk RECO_03 /dev/vdl1
|
||||
oracleasm createdisk RECO_04 /dev/vdm1
|
||||
91
tiddlywiki/KVM notes.txt
Executable file
91
tiddlywiki/KVM notes.txt
Executable file
@@ -0,0 +1,91 @@
|
||||
-- virsh usefull commands
|
||||
-------------------------
|
||||
# create new domain
|
||||
virt-install \
|
||||
--graphics vnc,listen=0.0.0.0 \
|
||||
--name=mandalore \
|
||||
--vcpus=2 \
|
||||
--memory=4096 \
|
||||
--network bridge=br0 \
|
||||
--network bridge=br0 \
|
||||
--cdrom=/mnt/yavin4/kit/CentOS/CentOS-8.2.2004-x86_64-minimal.iso \
|
||||
--disk /datastore/mandalore/hdd_01.img,size=6 \
|
||||
--os-variant=centos8
|
||||
|
||||
# get OS Variant
|
||||
osinfo-query os
|
||||
|
||||
# destroy a domain
|
||||
virsh destroy mandalore
|
||||
|
||||
# delete VM and underlying storage
|
||||
virsh undefine mandalore --remove-all-storage
|
||||
|
||||
|
||||
# adding disk to VM
|
||||
# on Dom0 create the disk (RAW format in exemple)
|
||||
qemu-img create -f raw /datastore/mandalore/app_01.img 8G
|
||||
# change the owner of the image ad permissions
|
||||
chown qemu:qemu /datastore/mandalore/app_01.img
|
||||
chmod 600 /datastore/mandalore/app_01.img
|
||||
# on DomU list block devices
|
||||
lsblk
|
||||
# or to have the sorted list of block devices
|
||||
fdisk -l | grep '^Disk /dev/vd[a-z]'
|
||||
# pick the next available device, ex: vdb
|
||||
# return to Dom0 and attach the disk
|
||||
virsh attach-disk mandalore /datastore/mandalore/app_01.img vdb --driver qemu --subdriver raw --targetbus virtio --persistent
|
||||
# to list the disk of a domain, execute from Dom0:
|
||||
virsh domblklist seedmachine --details
|
||||
|
||||
# to detach a disk
|
||||
virsh detach-disk mandalore vdb --persistent
|
||||
|
||||
|
||||
# to list the network interfaces of a domain, execute from Dom0:
|
||||
virsh domiflist mandalore
|
||||
# add network interface
|
||||
virsh attach-interface --domain vortex-db01 --type bridge --source br0 --model virtio --persistent
|
||||
# remove network interface
|
||||
virsh detach-interface --domain ylesia-db01 --mac 52:54:00:8f:40:3c --type bridge
|
||||
|
||||
|
||||
# dump domain XML config file
|
||||
virsh dumpxml mandalore
|
||||
|
||||
# define domain from XML config file
|
||||
virsh define --file /mnt/yavin4/tmp/seedmachine.xml
|
||||
|
||||
|
||||
# list all defined pool on Dom0
|
||||
virsh pool-list --all
|
||||
|
||||
# deleting a pool
|
||||
virsh pool-destroy atrisia1
|
||||
virsh pool-undefine atrisia1
|
||||
|
||||
# import (define) VM from XML file
|
||||
virsh define /mnt/yavin4/data/d.backup_vm/KVM_seed/Centos8_2020-10-25/seedmachine.xml
|
||||
|
||||
# clone VM
|
||||
virt-clone \
|
||||
--original mandalore \
|
||||
--name ossus \
|
||||
--file /datastore/ossus/hdd_01.img \
|
||||
--file /datastore/ossus/app_01.img
|
||||
|
||||
# KVM BUG: error: internal error: unknown feature amd-sev-es
|
||||
Workaround:
|
||||
mkdir -p /etc/qemu/firmware
|
||||
touch /etc/qemu/firmware/50-edk2-ovmf-cc.json
|
||||
|
||||
# Install KVM on CentOS8
|
||||
https://www.cyberciti.biz/faq/how-to-install-kvm-on-centos-8-headless-server/
|
||||
|
||||
# Online unicast MAC adress generator for network interface
|
||||
https://www.hellion.org.uk/cgi-bin/randmac.pl
|
||||
|
||||
# Static MAC Generator for KVM
|
||||
# from http://blog.zencoffee.org/2016/06/static-mac-generator-kvm/
|
||||
MAC=$(date +%s | md5sum | head -c 6 | sed -e 's/\([0-9A-Fa-f]\{2\}\)/\1:/g' -e 's/\(.*\):$/\1/' | sed -e 's/^/52:54:00:/')
|
||||
echo $MAC
|
||||
31
tiddlywiki/LVM - create PV_VG_LV and file system.txt
Executable file
31
tiddlywiki/LVM - create PV_VG_LV and file system.txt
Executable file
@@ -0,0 +1,31 @@
|
||||
# display device informations
|
||||
fdisk -l /dev/xvdf
|
||||
|
||||
# create PV and VG
|
||||
pvcreate /dev/xvdf
|
||||
vgcreate vg_fra /dev/xvdf
|
||||
|
||||
vgdisplay vg_fra -v
|
||||
|
||||
# create LV using 100% of free space in the VG
|
||||
lvcreate -n lv_fra -l 100%FREE vg_fra
|
||||
|
||||
# extend LV using 100% of free space in the VG
|
||||
lvextend -l +100%FREE /dev/vg-test/lv-test
|
||||
|
||||
# create XFS file system on LV
|
||||
mkfs.xfs /dev/vg_fra/lv_fra
|
||||
|
||||
# mount the file system
|
||||
mkdir -p /fra
|
||||
mount /dev/vg_fra/lv_fra /fra
|
||||
|
||||
df -hT /fra
|
||||
|
||||
# fstab entry
|
||||
/dev/mapper/vg_fra-lv_fra /fra xfs defaults 1 1
|
||||
|
||||
|
||||
umount /fra
|
||||
mount -a
|
||||
df -hT
|
||||
8
tiddlywiki/LVM - extend VG_LV and file system.txt
Executable file
8
tiddlywiki/LVM - extend VG_LV and file system.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
-- create a new PV with the nee device
|
||||
pvcreate /dev/xvdg
|
||||
-- extend the VG
|
||||
vgextend vg_app /dev/xvdg
|
||||
-- extend the LV
|
||||
lvextend -l +100%FREE /dev/vg_app/lv_app
|
||||
-- extend XFS file system
|
||||
xfs_growfs /app
|
||||
25
tiddlywiki/LVM example.txt
Executable file
25
tiddlywiki/LVM example.txt
Executable file
@@ -0,0 +1,25 @@
|
||||
lvdisplay
|
||||
vgdisplay
|
||||
pvdisplay
|
||||
|
||||
|
||||
pvcreate /dev/xvdd1
|
||||
pvcreate /dev/xvde1
|
||||
|
||||
|
||||
vgextend vg_pgdata /dev/xvdd1 /dev/xvde1
|
||||
|
||||
lvextend -l +100%FREE /dev/vg_pgdata/lv_pgdata
|
||||
|
||||
|
||||
-- For EXT4 partitions:
|
||||
resize2fs /dev/vg_pgdata/lv_pgdata
|
||||
|
||||
|
||||
-- For XFS:
|
||||
xfs_growfs -d /dev/vg_pgdata/lv_pgdata
|
||||
|
||||
-- to avoid WARNING: Not using lvmetad because duplicate PVs were found
|
||||
-- add in /etc/lvm/lvm.conf
|
||||
global_filter = [ "a|/dev/xvd*|", "r|/dev/sd*|" ]
|
||||
|
||||
67
tiddlywiki/LVM snapshots.txt
Executable file
67
tiddlywiki/LVM snapshots.txt
Executable file
@@ -0,0 +1,67 @@
|
||||
-- setup
|
||||
pvcreate /dev/xvdc1
|
||||
pvcreate /dev/xvdd1
|
||||
|
||||
pvs
|
||||
PV VG Fmt Attr PSize PFree
|
||||
/dev/xvdc1 lvm2 --- <100.00g <100.00g
|
||||
/dev/xvdd1 lvm2 --- <100.00g <100.00g
|
||||
|
||||
vgcreate vg_data /dev/xvdc1 /dev/xvdd1
|
||||
|
||||
vgs
|
||||
VG #PV #LV #SN Attr VSize VFree
|
||||
vg_data 2 0 0 wz--n- 199.99g 199.99g
|
||||
|
||||
|
||||
lvcreate -n lv_data -L 99G vg_data
|
||||
|
||||
mkfs.xfs /dev/vg_data/lv_data
|
||||
|
||||
mkdir /mnt/{original,snap}
|
||||
|
||||
mount /dev/vg_data/lv_data /mnt/original
|
||||
echo "/dev/vg_data/lv_data /mnt/original xfs defaults 0 0" >> /etc/fstab
|
||||
|
||||
-- snapshot creation
|
||||
lvcreate -L 99G -s /dev/vg_data/lv_data -n lv_snapshot
|
||||
|
||||
-- mount the snapshot LV (on XFS you should use -o nouuid option)
|
||||
mount -o nouuid /dev/vg_data/lv_snapshot /mnt/snap/
|
||||
|
||||
-- emptying file on snapshot FS
|
||||
> /mnt/snap/file_90G.raw
|
||||
|
||||
df -h /mnt/snap
|
||||
Filesystem Size Used Avail Use% Mounted on
|
||||
/dev/mapper/vg_data-lv_snapshot 99G 33M 99G 1% /mnt/snap
|
||||
|
||||
-- changes on snaphot FS does not affects data Data% usage on the snaphot LV
|
||||
lvs
|
||||
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
|
||||
lv_data vg_data owi-aos--- 99.00g
|
||||
lv_snapshot vg_data swi-aos--- 99.00g lv_data 0.00
|
||||
|
||||
-- change 10Gb of data on source LV
|
||||
dd if=/dev/zero of=/mnt/original/file_90G.raw bs=1G count=10
|
||||
|
||||
lvdisplay /dev/vg_data/lv_snapshot | grep "Allocated to snapshot"
|
||||
Allocated to snapshot 10.14%
|
||||
|
||||
-- revert to a snapshot
|
||||
umount /mnt/{original,snap}
|
||||
|
||||
lvconvert --merge /dev/vg_data/lv_snapshot
|
||||
|
||||
-- if the COW space is exhausted, LV snapshot status become INACTIVE
|
||||
-- we cannot revert from an INACTIVE snapshot
|
||||
|
||||
lvdisplay /dev/vg_data/lv_snapshot | grep "LV snapshot status"
|
||||
LV snapshot status INACTIVE destination for lv_data
|
||||
|
||||
lvconvert --merge /dev/vg_data/lv_snapshot
|
||||
Unable to merge invalidated snapshot LV "lv_snapshot".
|
||||
|
||||
-- remove a snapshot
|
||||
lvremove /dev/vg_data/lv_snapshot
|
||||
|
||||
186
tiddlywiki/Les verbes irreguliers.tid
Executable file
186
tiddlywiki/Les verbes irreguliers.tid
Executable file
@@ -0,0 +1,186 @@
|
||||
created: 20191023142957496
|
||||
creator: vplesnila
|
||||
modified: 20191107143218857
|
||||
modifier: vplesnila
|
||||
tags: English
|
||||
title: Les verbes irréguliers
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
|!Anglais ( Infinitif )|!Prétérit|!Participe passé|!Français ( Infinitif )|
|
||||
|abide|abode|abode|souffrir, suporter |
|
||||
|arise|arose|arisen|survenir|
|
||||
|awake|awoke|awoken|se réveiller|
|
||||
|be|was, were|been|être|
|
||||
|bear|bore|borne / born|porter / supporter|
|
||||
|beat|beat|beaten|battre|
|
||||
|become|became|become|devenir|
|
||||
|beget|begat / begot|begotten|engendrer|
|
||||
|begin|began|begun|commencer|
|
||||
|bend|bent|bent|plier / se courber|
|
||||
|bereave|bereft / bereaved|bereft / bereaved|déposséder / priver|
|
||||
|bet|bet|bet|parier|
|
||||
|bid|bid / bade|bid / bidden|offrir|
|
||||
|bite|bit|bitten|mordre|
|
||||
|bleed|bled|bled|saigner|
|
||||
|blow|blew|blown|souffler / gonfler|
|
||||
|break|broke|broken|casser|
|
||||
|breed|bred|bred|élever (des animaux)|
|
||||
|bring|brought|brought|apporter|
|
||||
|broadcast|broadcast|broadcast|diffuser / émettre|
|
||||
|build|built|built|construire|
|
||||
|burn|burnt / burned|burnt / burned|brûler|
|
||||
|burst|burst|burst|éclater|
|
||||
|buy|bought|bought|acheter|
|
||||
|can|could|could|pouvoir|
|
||||
|cast|cast|cast|jeter / distribuer (rôles)|
|
||||
|catch|caught|caught|attraper|
|
||||
|chide|chid|chiden|gronder|
|
||||
|choose|chose|chosen|choisir|
|
||||
|cling|clung|clung|s’accrocher|
|
||||
|clothe|clad / clothed|clad / clothed|habiller / recouvrir|
|
||||
|come|came|come|venir|
|
||||
|cost|cost|cost|coûter|
|
||||
|creep|crept|crept|ramper|
|
||||
|cut|cut|cut|couper|
|
||||
|deal|dealt|dealt|distribuer|
|
||||
|dig|dug|dug|creuser|
|
||||
|dive|dived|dived / dove|plonger|
|
||||
|do|did|done|faire|
|
||||
|draw|drew|drawn|dessiner / tirer|
|
||||
|dream|dreamt / dreamed|dreamt / dreamed|rêver|
|
||||
|drink|drank|drunk|boire|
|
||||
|drive |drove|driven|conduire|
|
||||
|dwell|dwelt|dwelt / dwelled|habiter|
|
||||
|eat|ate|eaten|manger|
|
||||
|fall|fell|fallen|tomber|
|
||||
|feed|fed|fed|nourrir|
|
||||
|feel|felt|felt|se sentir / ressentir|
|
||||
|fight|fought|fought|se battre|
|
||||
|find|found|found|trouver|
|
||||
|flee|fled|fled|s’enfuir|
|
||||
|fling|flung|flung|lancer|
|
||||
|fly|flew|flown|voler|
|
||||
|forbid|forbade|forbidden|interdire|
|
||||
|forecast|forecast|forecast|prévoir|
|
||||
|forget|forgot|forgotten / forgot|oublier|
|
||||
|forgive|forgave|forgiven|pardonner|
|
||||
|forsake|forsook|forsaken|abandonner|
|
||||
|forsee|foresaw|foresawn|prévoir / présentir|
|
||||
|freeze|froze|frozen|geler|
|
||||
|get|got|gotten / got|obtenir|
|
||||
|give|gave|given|donner|
|
||||
|go|went|gone|aller|
|
||||
|grind|ground|ground|moudre / opprimer|
|
||||
|grow|grew|grown|grandir / pousser|
|
||||
|hang|hung|hung|tenir / pendre|
|
||||
|have|had|had|avoir|
|
||||
|hear|heard|heard|entendre|
|
||||
|hide|hid|hidden|cacher|
|
||||
|hit|hit|hit|taper / appuyer|
|
||||
|hold|held|held|tenir|
|
||||
|hurt|hurt|hurt|blesser|
|
||||
|keep|kept|kept|garder|
|
||||
|kneel|knelt / knelled|knelt / kneeled|s’agenouiller|
|
||||
|know|knew|known|connaître / savoir|
|
||||
|lay|laid|laid|poser|
|
||||
|lead|led|led|mener / guider|
|
||||
|lean|leant / leaned|leant / leaned|s’incliner / se pencher|
|
||||
|leap|leapt / leaped|leapt / leaped|sauter / bondir|
|
||||
|learn|learnt|learnt|apprendre|
|
||||
|leave|left|left|laisser / quitter / partir|
|
||||
|lend|lent|lent|prêter|
|
||||
|let|let|let|permettre / louer / laisser|
|
||||
|lie|lay|lain|s’allonger|
|
||||
|light|lit / lighted|lit / lighted|allumer|
|
||||
|lose|lost|lost|perdre|
|
||||
|make|made|made|fabriquer|
|
||||
|mean|meant|meant|signifier|
|
||||
|meet|met|met|rencontrer|
|
||||
|mow|mowed|mowed / mown|tondre|
|
||||
|offset|offset|offset|compenser|
|
||||
|overcome|overcame|overcome|surmonter|
|
||||
|partake|partook|partaken|prendre part à|
|
||||
|pay|paid|paid|payer|
|
||||
|plead|pled / pleaded|pled / pleaded|supplier / plaider|
|
||||
|preset|preset|preset|programmer|
|
||||
|prove|proved|proven / proved|prouver|
|
||||
|put|put|put|mettre|
|
||||
|quit|quit|quit|quitter|
|
||||
|read|read|read|lire|
|
||||
|relay|relaid|relaid|relayer|
|
||||
|rend|rent|rent|déchirer|
|
||||
|rid|rid|rid|débarrasser|
|
||||
|ride|rode|ridden|monter (vélo, cheval)|
|
||||
|ring|rang|rung|sonner / téléphoner|
|
||||
|rise|rose|risen|lever|
|
||||
|run|ran|run|courir|
|
||||
|saw|saw / sawed|sawn / sawed|scier|
|
||||
|say|said|said|dire|
|
||||
|see|saw|seen|voir|
|
||||
|seek|sought|sought|chercher|
|
||||
|sell|sold|sold|vendre|
|
||||
|send|sent|sent|envoyer|
|
||||
|set|set|set|fixer|
|
||||
|shake|shook|shaken|secouer|
|
||||
|shed|shed|shed|répandre / laisser tomber|
|
||||
|shine|shone|shone|briller|
|
||||
|shoe|shod|shod|chausser|
|
||||
|shoot|shot|shot|tirer / fusiller|
|
||||
|show|showed|shown|montrer|
|
||||
|shut|shut|shut|fermer|
|
||||
|sing|sang|sung|chanter|
|
||||
|sink|sank / sunk|sunk / sunken|couler|
|
||||
|sit|sat|sat|s’asseoir|
|
||||
|slay|slew|slain|tuer|
|
||||
|sleep|slept|slept|dormir|
|
||||
|slide|slid|slid|glisser|
|
||||
|slink|slunk / slinked|slunk / slinked|s’en aller furtivement|
|
||||
|slit|slit|slit|fendre|
|
||||
|smell|smelt|smelt|sentir|
|
||||
|sow|sowed|sown / sowed|semer|
|
||||
|speak|spoke|spoken|parler|
|
||||
|speed|sped|sped|aller vite|
|
||||
|spell|spelt|spelt|épeler / orthographier|
|
||||
|spend|spent|spent|dépenser / passer du temps|
|
||||
|spill|spilt / spilled|spilt / spilled|renverser|
|
||||
|spin|spun|spun|tourner / faire tourner|
|
||||
|spit|spat / spit|spat / spit|cracher|
|
||||
|split|split|split|fendre|
|
||||
|spoil|spoilt|spoilt|gâcher / gâter|
|
||||
|spread|spread|spread|répandre|
|
||||
|spring|sprang|sprung|surgir / jaillir / bondir|
|
||||
|stand|stood|stood|être debout|
|
||||
|steal|stole|stolen|voler / dérober|
|
||||
|stick|stuck|stuck|coller|
|
||||
|sting|stung|stung|piquer|
|
||||
|stink|stank|stunk|puer|
|
||||
|strew|strewed|strewn / strewed|éparpiller|
|
||||
|strike|struck|stricken / struck|frapper|
|
||||
|strive|strove|striven|s’efforcer|
|
||||
|swear|swore|sworn|jurer|
|
||||
|sweat|sweat / sweated|sweat / sweated|suer|
|
||||
|sweep|swept|swept|balayer|
|
||||
|swell|swelled|swollen / swelled|gonfler / enfler|
|
||||
|swim|swam|swum|nager|
|
||||
|swing|swung|swung|se balancer|
|
||||
|take|took|taken|prendre|
|
||||
|teach|taught|taught|enseigner|
|
||||
|tear|tore|torn|déchirer|
|
||||
|tell|told|told|dire / raconter|
|
||||
|think|thought|thought|penser|
|
||||
|thrive|throve / thrived|thriven / thrived|prospérer|
|
||||
|throw|threw|thrown|jeter|
|
||||
|thrust|thrust|thrust|enfoncer|
|
||||
|tread|trod|trodden|piétiner quelque chose|
|
||||
|typeset|typeset|typeset|composer|
|
||||
|undergo|underwent|undergone|subir|
|
||||
|understand|understood|understood|comprendre|
|
||||
|wake|woke|woken|réveiller|
|
||||
|wear|wore|worn|porter (avoir sur soi)|
|
||||
|weep|wept|wept|pleurer|
|
||||
|wet|wet / wetted|wet / wetted|mouiller|
|
||||
|win|won|won|gagner|
|
||||
|wind|wound|wound|enrouler / remonter|
|
||||
|withdraw|withdrew|withdrawn|se retirer|
|
||||
|wring|wrung|wrung|tordre|
|
||||
|write|wrote|written|écrire|
|
||||
1
tiddlywiki/Linux - listening ports.txt
Executable file
1
tiddlywiki/Linux - listening ports.txt
Executable file
@@ -0,0 +1 @@
|
||||
alias listen='lsof -i -P | grep -i "listen"'
|
||||
6
tiddlywiki/Linux - remove a systemd service.txt
Executable file
6
tiddlywiki/Linux - remove a systemd service.txt
Executable file
@@ -0,0 +1,6 @@
|
||||
systemctl stop <your_service>.service
|
||||
systemctl disable <your_service>.service
|
||||
rm -rf /etc/systemd/system/<your_service>.service
|
||||
rm -rf /usr/lib/systemd/system/<your_service>.service
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed
|
||||
9
tiddlywiki/Linux 1.tid
Executable file
9
tiddlywiki/Linux 1.tid
Executable file
@@ -0,0 +1,9 @@
|
||||
color: #424200
|
||||
created: 20190622232815693
|
||||
creator: vplesnila
|
||||
modified: 20190622233250612
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: Linux
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
3
tiddlywiki/Linux Mint.md
Executable file
3
tiddlywiki/Linux Mint.md
Executable file
@@ -0,0 +1,3 @@
|
||||
[Install Touchegg](https://ubuntuhandbook.org/index.php/2021/06/multi-touch-gestures-ubuntu-20-04/)
|
||||
[How to add a shell script to launcher as shortcut](https://askubuntu.com/questions/141229/how-to-add-a-shell-script-to-launcher-as-shortcut)
|
||||
|
||||
4
tiddlywiki/Linux perf tips.txt
Executable file
4
tiddlywiki/Linux perf tips.txt
Executable file
@@ -0,0 +1,4 @@
|
||||
iostat -x 2 5
|
||||
|
||||
# show environement variable of a linux process
|
||||
cat /proc/<PID>/environ | tr '\0' '\n'
|
||||
5
tiddlywiki/Linux prompt examples.txt
Executable file
5
tiddlywiki/Linux prompt examples.txt
Executable file
@@ -0,0 +1,5 @@
|
||||
export PS1='`whoami`@`hostname | cut -d "." -f1`:${PWD}> '
|
||||
# PROMPT: pgadmin@mobus/home/pgadmin>
|
||||
|
||||
export PS1='$USER@`hostname`[$ORACLE_SID]:$PWD\$ '
|
||||
# PROMPT: oracle@ambria.swgalaxy[C3PXPRD]:/app$
|
||||
22
tiddlywiki/Markdown example 01.md
Executable file
22
tiddlywiki/Markdown example 01.md
Executable file
@@ -0,0 +1,22 @@
|
||||
Inline code: file `myfile.txt`is the good example :)
|
||||
|
||||
Code example:
|
||||
```sql
|
||||
SET SERVEROUTPUT ON
|
||||
SET FEEDBACK OFF
|
||||
declare
|
||||
|
||||
CURSOR c_user_tablespaces is
|
||||
select tablespace_name
|
||||
from dba_tablespaces
|
||||
where contents not in ('UNDO','TEMPORARY') and tablespace_name not in ('SYSTEM','SYSAUX');
|
||||
|
||||
BEGIN
|
||||
for r_user_tablespaces in c_user_tablespaces
|
||||
loop
|
||||
s1 := s1 || r_user_tablespaces.tablespace_name || ',';
|
||||
s2 := s2 || r_user_tablespaces.tablespace_name || ''','|| chr(13)||'''';
|
||||
end loop;
|
||||
END;
|
||||
/
|
||||
```
|
||||
110
tiddlywiki/Markdown test 2.md
Executable file
110
tiddlywiki/Markdown test 2.md
Executable file
@@ -0,0 +1,110 @@
|
||||
## Mon titre 2
|
||||
#### Mon Sous-titre 2
|
||||
|
||||
Bla, blah!Bla, blah!Bla, blah!Bla,\blah!Bla, blah!Bla, blah!Bla, blah!
|
||||
|
||||
#### Mon Sous-titre 2bis
|
||||
|
||||
|
||||
```ruby
|
||||
def index
|
||||
puts "hello world"
|
||||
end
|
||||
```
|
||||
|
||||
Bla bla bla,
|
||||
bla bla bla
|
||||
```sql
|
||||
select * from dual;
|
||||
```
|
||||
|
||||
# Markdown syntax guide
|
||||
|
||||
## Headers
|
||||
|
||||
# This is a Heading h1
|
||||
## This is a Heading h2
|
||||
###### This is a Heading h6
|
||||
|
||||
## Emphasis
|
||||
|
||||
*This text will be italic*
|
||||
_This will also be italic_
|
||||
|
||||
**This text will be bold**
|
||||
__This will also be bold__
|
||||
|
||||
_You **can** combine them_
|
||||
|
||||
## Lists
|
||||
|
||||
### Unordered
|
||||
|
||||
* Item 1
|
||||
* Item 2
|
||||
* Item 2a
|
||||
* Item 2b
|
||||
|
||||
### Ordered
|
||||
|
||||
1. Item 1
|
||||
1. Item 2
|
||||
1. Item 3
|
||||
1. Item 3a
|
||||
1. Item 3b
|
||||
|
||||
## Images
|
||||
|
||||

|
||||
|
||||
## Links
|
||||
|
||||
You may be using [Markdown Live Preview](https://markdownlivepreview.com/).
|
||||
|
||||
## Blockquotes
|
||||
|
||||
> Markdown is a lightweight markup language with plain-text-formatting syntax, created in 2004 by John Gruber with Aaron Swartz.
|
||||
>
|
||||
>> Markdown is often used to format readme files, for writing messages in online discussion forums, and to create rich text using a plain text editor.
|
||||
|
||||
## Tables
|
||||
|
||||
| Left columns | Right columns |
|
||||
| ------------- |:-------------:|
|
||||
| left foo | right foo |
|
||||
| left bar | right bar |
|
||||
| left baz | right baz |
|
||||
|
||||
|
||||
| Tables | Are | Cool |
|
||||
|----------|:-------------:|------:|
|
||||
| col 1 is | left-aligned | $1600 |
|
||||
| col 2 is | centered | $12 |
|
||||
| col 3 is | right-aligned | $1 |
|
||||
|
||||
| Script | Description | Example |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------| |... |... |
|
||||
|... |... |... |
|
||||
|... |... |... |
|
||||
|... |... |... |
|
||||
|... |... |... |
|
||||
|
||||
## Blocks of code
|
||||
|
||||
```
|
||||
let message = 'Hello world';
|
||||
alert(message);
|
||||
```
|
||||
|
||||
## Inline code
|
||||
|
||||
This web site is using `markedjs/marked`.
|
||||
|
||||
|
||||
|
||||
- Antebellum - film 2020 - AlloCiné
|
||||
- Mission trésor - film 2017 - AlloCiné (Noah)
|
||||
- Color Out of Space - film 2019 - AlloCiné
|
||||
- Miss Fisher & the Crypt of Tears (2020) - IMDb
|
||||
- Ghost Killers vs. Bloody Mary - film 2017 - AlloCiné
|
||||
|
||||
67
tiddlywiki/Markdown test.md
Executable file
67
tiddlywiki/Markdown test.md
Executable file
@@ -0,0 +1,67 @@
|
||||
# Markdown syntax guide
|
||||
|
||||
## Headers
|
||||
|
||||
# This is a Heading h1
|
||||
## This is a Heading h2
|
||||
###### This is a Heading h6
|
||||
|
||||
## Emphasis
|
||||
|
||||
*This text will be italic*
|
||||
_This will also be italic_
|
||||
|
||||
**This text will be bold**
|
||||
__This will also be bold__
|
||||
|
||||
_You **can** combine them_
|
||||
|
||||
## Lists
|
||||
|
||||
### Unordered
|
||||
|
||||
* Item 1
|
||||
* Item 2
|
||||
* Item 2a
|
||||
* Item 2b
|
||||
|
||||
### Ordered
|
||||
|
||||
1. Item 1
|
||||
1. Item 2
|
||||
1. Item 3
|
||||
1. Item 3a
|
||||
1. Item 3b
|
||||
|
||||
## Images
|
||||
|
||||

|
||||
|
||||
## Links
|
||||
|
||||
You may be using [Markdown Live Preview](https://markdownlivepreview.com/).
|
||||
|
||||
## Blockquotes
|
||||
|
||||
> Markdown is a lightweight markup language with plain-text-formatting syntax, created in 2004 by John Gruber with Aaron Swartz.
|
||||
>
|
||||
>> Markdown is often used to format readme files, for writing messages in online discussion forums, and to create rich text using a plain text editor.
|
||||
|
||||
## Tables
|
||||
|
||||
| Left columns | Right columns |
|
||||
| ------------- |:-------------:|
|
||||
| left foo | right foo |
|
||||
| left bar | right bar |
|
||||
| left baz | right baz |
|
||||
|
||||
## Blocks of code
|
||||
|
||||
```
|
||||
let message = 'Hello world';
|
||||
alert(message);
|
||||
```
|
||||
|
||||
## Inline code
|
||||
|
||||
This web site is using `markedjs/marked`.
|
||||
287
tiddlywiki/Migration + upgrade cross-platform using incremental backups.txt
Executable file
287
tiddlywiki/Migration + upgrade cross-platform using incremental backups.txt
Executable file
@@ -0,0 +1,287 @@
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ Walkthrough for:
|
||||
~~ V4 Reduce Transportable Tablespace Downtime using Cross Platform Incremental Backup (Doc ID 2471245.1)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This procedure apply on 11.2.0.4 or higher soure database.
|
||||
The target database can be in higher version than source database (upgrade)
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ Source database setup
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
initGREEDOPRD.ora:
|
||||
|
||||
db_name=GREEDO
|
||||
instance_name=GREEDOPRD
|
||||
db_unique_name=GREEDOPRD
|
||||
compatible=11.2.0.0
|
||||
control_files=(/data/GREEDOPRD/control01.ctl)
|
||||
db_create_file_dest=/data
|
||||
db_create_online_log_dest_1=/data
|
||||
db_recovery_file_dest_size=4G
|
||||
db_recovery_file_dest=/fra
|
||||
log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
log_archive_format=%t_%s_%r.arc
|
||||
db_block_size=8192
|
||||
open_cursors=300
|
||||
diagnostic_dest=/app/oracle/base/admin/GREEDOPRD
|
||||
sga_max_size=3G
|
||||
sga_target=3G
|
||||
pga_aggregate_target=512M
|
||||
processes=350
|
||||
audit_file_dest=/app/oracle/base/admin/GREEDOPRD/adump
|
||||
audit_trail=db
|
||||
remote_login_passwordfile=exclusive
|
||||
undo_tablespace=UNDOTBS
|
||||
|
||||
|
||||
-- tablespace setup
|
||||
create tablespace TS1 datafile size 16M autoextend ON next 16M;
|
||||
create tablespace TS2 datafile size 16M autoextend ON next 16M;
|
||||
create tablespace TS3 datafile size 16M autoextend ON next 16M;
|
||||
|
||||
alter tablespace TS1 add datafile size 16M autoextend ON next 16M;
|
||||
alter tablespace TS1 add datafile size 16M autoextend ON next 16M;
|
||||
alter tablespace TS2 add datafile size 16M autoextend ON next 16M;
|
||||
alter tablespace TS2 add datafile size 16M autoextend ON next 16M;
|
||||
alter tablespace TS2 add datafile size 16M autoextend ON next 16M;
|
||||
|
||||
-- schema setup
|
||||
grant connect, resource, unlimited tablespace to user1 identified by user1;
|
||||
grant connect, resource, unlimited tablespace to user2 identified by user2;
|
||||
|
||||
grant create view to user1;
|
||||
grant create view to user2;
|
||||
|
||||
create profile STANDARD_USER limit
|
||||
SESSIONS_PER_USER 10
|
||||
CONNECT_TIME 30;
|
||||
|
||||
create profile VIP_USER limit
|
||||
SESSIONS_PER_USER 20
|
||||
CONNECT_TIME 60;
|
||||
|
||||
alter user user1 profile STANDARD_USER;
|
||||
alter user user2 profile VIP_USER;
|
||||
|
||||
-- schema contents setup
|
||||
create table user1.tab1 as select * from dba_extents;
|
||||
alter table user1.tab1 move tablespace TS1;
|
||||
insert into user1.tab1 select * from user1.tab1;
|
||||
insert into user1.tab1 select * from user1.tab1;
|
||||
insert into user1.tab1 select * from user1.tab1;
|
||||
commit;
|
||||
insert into user1.tab1 select * from user1.tab1;
|
||||
insert into user1.tab1 select * from user1.tab1;
|
||||
commit;
|
||||
|
||||
create table user2.tab2 as select * from user1.tab1;
|
||||
insert into user2.tab2 select * from user2.tab2;
|
||||
commit;
|
||||
insert into user2.tab2 select * from user2.tab2;
|
||||
commit;
|
||||
|
||||
alter table user1.tab1 move tablespace TS2;
|
||||
|
||||
create index user1.ind1 on user1.tab1(blocks) tablespace TS3;
|
||||
create index user2.ind2 on user2.tab2(blocks) tablespace TS3;
|
||||
|
||||
alter table user2.tab2 move tablespace TS2;
|
||||
alter index user2.ind2 rebuild tablespace TS3;
|
||||
|
||||
|
||||
|
||||
create table user1.message(m varchar2(30), d date) tablespace TS3;
|
||||
insert into user1.message values('Setup',sysdate);
|
||||
commit;
|
||||
|
||||
|
||||
grant select on v_$session to user1;
|
||||
grant select on v_$tablespace to user2;
|
||||
|
||||
connect user1/user1
|
||||
create view sess as select * from v$session;
|
||||
|
||||
|
||||
connect user2/user2
|
||||
|
||||
create or replace procedure TSLIST
|
||||
is
|
||||
cursor c_ts is select * from v$tablespace;
|
||||
begin
|
||||
for r_ts in c_ts
|
||||
loop
|
||||
dbms_output.put_line( 'Tablespace: ' ||r_ts.name );
|
||||
end loop;
|
||||
end;
|
||||
/
|
||||
|
||||
|
||||
-- check if the tablespaces are self_contained
|
||||
SQL> exec sys.dbms_tts.transport_set_check(ts_list => 'TS1,TS2,TS3', incl_constraints => true);
|
||||
SQL> Select * from transport_set_violations;
|
||||
|
||||
PL/SQL procedure successfully completed.
|
||||
|
||||
-- backup source database
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/GREEDO/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/GREEDO/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/GREEDO/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/GREEDO/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database include current controlfile plus archivelog delete input;
|
||||
}
|
||||
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ Target database setup
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
initWEDGEPRD.ora:
|
||||
|
||||
db_name=WEDGE
|
||||
instance_name=WEDGEPRD
|
||||
db_unique_name=WEDGEPRD
|
||||
compatible=19.0.0.0.0
|
||||
control_files=(/data/WEDGEPRD/control01.ctl)
|
||||
db_create_file_dest=/data
|
||||
db_create_online_log_dest_1=/data
|
||||
db_recovery_file_dest_size=4G
|
||||
db_recovery_file_dest=/fra
|
||||
log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
log_archive_format=%t_%s_%r.arc
|
||||
db_block_size=8192
|
||||
open_cursors=300
|
||||
diagnostic_dest=/app/oracle/base/admin/WEDGEPRD
|
||||
sga_max_size=3G
|
||||
sga_target=3G
|
||||
pga_aggregate_target=512M
|
||||
pga_aggregate_limit=2G
|
||||
processes=350
|
||||
audit_file_dest=/app/oracle/base/admin/WEDGEPRD/adump
|
||||
audit_trail=db
|
||||
remote_login_passwordfile=exclusive
|
||||
undo_tablespace=TBS_UNDO
|
||||
|
||||
|
||||
-- backup target database
|
||||
run
|
||||
{
|
||||
set nocfau;
|
||||
allocate channel ch01 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/WEDGE/%d_%U_%s_%t.bck';
|
||||
allocate channel ch02 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/WEDGE/%d_%U_%s_%t.bck';
|
||||
allocate channel ch03 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/WEDGE/%d_%U_%s_%t.bck';
|
||||
allocate channel ch04 device type disk format '/mnt/yavin4/tmp/_oracle_/orabackup/WEDGE/%d_%U_%s_%t.bck';
|
||||
backup as compressed backupset incremental level 0 database include current controlfile plus archivelog delete input;
|
||||
}
|
||||
|
||||
|
||||
~~
|
||||
|
||||
-- downnload scripts (attached to note, currently: rman_xttconvert_VER4.3.zip) to source machine
|
||||
-- unzip to a temporary location
|
||||
-- edit xtt.properties file at least with mandatory filds:
|
||||
|
||||
tablespaces
|
||||
platformid
|
||||
src_scratch_location
|
||||
dest_scratch_location
|
||||
dest_datafile_location
|
||||
(if using 12c) -- usermantransport=1
|
||||
|
||||
|
||||
-- get PLATFORM_ID for SOURCE and DESTINATION databases
|
||||
SQL> select PLATFORM_ID from V$DATABASE;
|
||||
|
||||
-- once xtt.properties OK on source, copy to dest in $TEMPDIR
|
||||
|
||||
-- set TEMPDIR environement variable for BOTH machines:
|
||||
export TMPDIR=/mnt/yavin4/tmp/_oracle_/tmp/TEMP_SOURCE_XTTCONVERT
|
||||
export TMPDIR=/mnt/yavin4/tmp/_oracle_/tmp/TEMP_DEST_XTTCONVERT
|
||||
|
||||
|
||||
-- Run the backup on the source system
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --backup
|
||||
|
||||
|
||||
-- Restore the datafiles on the destination system
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --restore
|
||||
|
||||
|
||||
-- Roll Forward Phase
|
||||
-- as long as necessary perform backup/restore (incremental!) using previous commands
|
||||
|
||||
-- in order to trace, we add a new datafile and some data
|
||||
|
||||
insert into user1.message values('Roll Forward Phase',sysdate);
|
||||
commit;
|
||||
|
||||
alter tablespace TS2 add datafile size 8M autoextend ON next 8M;
|
||||
|
||||
|
||||
|
||||
|
||||
-- Phase final Incremental Backup
|
||||
-- If you are running 12c, this step can be replaced by Phase 4 in Note 2005729.1
|
||||
|
||||
insert into user1.message values('Just before RO tablespaces',sysdate);
|
||||
|
||||
alter tablespace TS1 read only;
|
||||
alter tablespace TS2 read only;
|
||||
alter tablespace TS3 read only;
|
||||
|
||||
-- take final incremental backup ignoring errors like:
|
||||
ORA-20001: TABLESPACE(S) IS READONLY OR,
|
||||
OFFLINE JUST CONVERT, COPY
|
||||
ORA-06512: at line 284
|
||||
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --backup
|
||||
|
||||
-- restore final incremental backup on target database
|
||||
cd $TMPDIR
|
||||
$ORACLE_HOME/perl/bin/perl xttdriver.pl --restore
|
||||
|
||||
|
||||
-- on source
|
||||
------------
|
||||
mkdir -p /mnt/yavin4/tmp/_oracle_/tmp/DATAPUMP_SOURCE_XTTCONVERT
|
||||
SQL> create directory DPUMP_TTS as '/mnt/yavin4/tmp/_oracle_/tmp/DATAPUMP_SOURCE_XTTCONVERT';
|
||||
|
||||
cd /mnt/yavin4/tmp/_oracle_/tmp/DATAPUMP_SOURCE_XTTCONVERT
|
||||
|
||||
-- export metadata
|
||||
expdp userid=system/secret directory=DPUMP_TTS LOGFILE=metadata.log FULL=y INCLUDE=USER,ROLE,ROLE_GRANT,PROFILE dumpfile=metadata.dmp CONTENT=METADATA_ONLY
|
||||
|
||||
-- parfile exp.par:
|
||||
dumpfile=xttdump.dmp
|
||||
directory=DPUMP_TTS
|
||||
statistics=NONE
|
||||
transport_tablespaces=TS1,TS2,TS3
|
||||
transport_full_check=y
|
||||
logfile=tts_export.log
|
||||
|
||||
-- expdp en mode "transportable tablespace"
|
||||
expdp userid=system/***** parfile=exp.par
|
||||
|
||||
-- copy dumpfiles from source to destination
|
||||
cp /mnt/yavin4/tmp/_oracle_/tmp/DATAPUMP_SOURCE_XTTCONVERT/xttdump.dmp /mnt/yavin4/tmp/_oracle_/tmp/DATAPUMP_DEST_XTTCONVERT/
|
||||
|
||||
-- on target
|
||||
------------
|
||||
-- import metadata
|
||||
impdp userid=system/secret directory=DPUMP_TTS dumpfile=metadata.dmp logfile=import_metadata.log remap_tablespace=TEMP:TMS_TEMP
|
||||
-- import "transportable tablespace"
|
||||
impdp userid=system/secret parfile=imp.par
|
||||
|
||||
|
||||
~~~~~~~~~~~~~~
|
||||
~~ Other links
|
||||
~~~~~~~~~~~~~~
|
||||
-- https://dbavivekdhiman.wordpress.com/2015/05/31/cross-platform-migration-from-aix-oracle-11-2-0-3-to-linux11-2-0-3/
|
||||
-- 11G - Reduce Transportable Tablespace Downtime using Cross Platform Incremental Backup (Doc ID 1389592.1)
|
||||
|
||||
|
||||
24
tiddlywiki/MongoDB - enable authentication using SCRAM-SHA-1.txt
Executable file
24
tiddlywiki/MongoDB - enable authentication using SCRAM-SHA-1.txt
Executable file
@@ -0,0 +1,24 @@
|
||||
-- create database for user management
|
||||
use admin
|
||||
-- create first superuser
|
||||
> db.createUser({ user: "superhero", pwd: "secret", roles: ["root"]});
|
||||
-- to list all users
|
||||
> show users
|
||||
|
||||
-- add in MongoDB configuration file ->
|
||||
security:
|
||||
authorization: 'enabled'
|
||||
<-------------------------------------
|
||||
|
||||
-- restart MongoDB
|
||||
systemctl stop mongod
|
||||
systemctl start mongod
|
||||
|
||||
-- to connect within mongo shell
|
||||
> use admin
|
||||
> db.auth('superhero', 'secret');
|
||||
|
||||
-- authentification at mongo shell connection
|
||||
mongo --host frdrpsrv4483 --username "superhero" --password "secret" --authenticationDatabase "admin"
|
||||
|
||||
|
||||
43
tiddlywiki/MongoDB - example replication.txt
Executable file
43
tiddlywiki/MongoDB - example replication.txt
Executable file
@@ -0,0 +1,43 @@
|
||||
-- create keyfile for communication between MongoDB instances
|
||||
openssl rand -base64 756 > /app/mongodb/conf/keyfile.basic
|
||||
chmod 600 /app/mongodb/conf/keyfile.basic
|
||||
|
||||
-- copy keyfile on ivera-mongo02 and ivera-mongo03
|
||||
|
||||
-- mongod.conf on ivera-mongo01
|
||||
-------------------------------
|
||||
storage:
|
||||
dbPath: "/data/mongodb/"
|
||||
journal:
|
||||
enabled: true
|
||||
wiredTiger:
|
||||
engineConfig:
|
||||
cacheSizeGB: 1
|
||||
|
||||
net:
|
||||
port: 27017
|
||||
bindIp: 127.0.0.1,ivera-mongo01,ivera-mongo01-priv
|
||||
|
||||
security:
|
||||
authorization: 'enabled'
|
||||
keyFile: /app/mongodb/conf/keyfile.basic
|
||||
|
||||
replication:
|
||||
replSetName: majrc
|
||||
oplogSizeMB: 100
|
||||
enableMajorityReadConcern: true
|
||||
|
||||
|
||||
-- similar config files on ivera-mongo02 and ivera-mongo03
|
||||
|
||||
|
||||
-- on ivera-mongo01 that will be defined as PRIMARY
|
||||
|
||||
cfg = { "_id" : "majrc", "members" : [ { "_id" : 0, "host":"ivera-mongo01-priv:27017", } ] }
|
||||
rs.initiate(cfg)
|
||||
|
||||
rs.add('ivera-mongo02-priv:27017');
|
||||
rs.add('ivera-mongo03-priv:27017');
|
||||
|
||||
rs.conf();
|
||||
rs.status();
|
||||
22
tiddlywiki/MongoDB - extrenal ressources.tid
Executable file
22
tiddlywiki/MongoDB - extrenal ressources.tid
Executable file
@@ -0,0 +1,22 @@
|
||||
created: 20200207141410929
|
||||
creator: vplesnila
|
||||
modified: 20210110091855530
|
||||
modifier: vplesnila
|
||||
tags: MongoDB
|
||||
title: MongoDB - extrenal ressources
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
|!comment |!url |
|
||||
|Guru99|https://www.guru99.com/mongodb-tutorials.html|
|
||||
|Tutorials Point|https://www.tutorialspoint.com/mongodb/index.htm|
|
||||
||http://andreiarion.github.io/TP7_MongoDB_Replication_exercices|
|
||||
||https://medium.com/codeops/how-to-setup-a-mongodb-replica-set-918f21da50ed|
|
||||
|Manual point-in-time recovery|https://www.tothenew.com/blog/mongo-point-in-time-restoration/|
|
||||
|Shard setup example|https://www.linode.com/docs/guides/build-database-clusters-with-mongodb/|
|
||||
|Shard setup example|https://www.howtoforge.com/tutorial/deploying-mongodb-sharded-cluster-on-centos-7/#three-sharding-components|
|
||||
|MongoDB Workbook|http://nicholasjohnson.com/mongo/course/workbook/|
|
||||
|MongoDB Exam Guide|https://university.mongodb.com/exam/guide|
|
||||
|Sharding data collections with MongoDB|http://vargas-solar.com/big-data-analytics/hands-on/sharding/|
|
||||
|Learn MongoDB The Hard Way|http://learnmongodbthehardway.com/|
|
||||
|
||||
|
||||
87
tiddlywiki/MongoDB - install on CentOS8.txt
Executable file
87
tiddlywiki/MongoDB - install on CentOS8.txt
Executable file
@@ -0,0 +1,87 @@
|
||||
# Linux packages
|
||||
dnf -y wget install net-snmp-agent-libs
|
||||
|
||||
groupadd mongod
|
||||
useradd mongod -g mongod -G mongod
|
||||
mkdir -p /app/mongodb
|
||||
chown -R mongod:mongod /app/mongodb
|
||||
|
||||
# disable selinux
|
||||
# in /etc/selinux/config -->
|
||||
SELINUX=disabled
|
||||
# <-------------------------
|
||||
|
||||
# Disable Transparent Huge Pages (THP) following https://docs.mongodb.com/manual/tutorial/transparent-huge-pages/
|
||||
|
||||
su - mongod
|
||||
cd /app/mongodb
|
||||
mkdir product conf data log
|
||||
cd product
|
||||
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel80-4.2.3.tgz
|
||||
gunzip -c mongodb-linux-x86_64-rhel80-4.2.3.tgz | tar -xvf -
|
||||
rm -rf mongodb-linux-x86_64-rhel80-4.2.3.tgz
|
||||
ln -s mongodb-linux-x86_64-rhel80-4.2.3 current_version
|
||||
|
||||
# create configuration file
|
||||
# /app/mongodb/conf/mongod.conf -->
|
||||
storage:
|
||||
dbPath: "/app/mongodb/data"
|
||||
journal:
|
||||
enabled: true
|
||||
|
||||
net:
|
||||
port: 27017
|
||||
bindIp: 127.0.0.1,192.168.0.127
|
||||
# <-------------------------------
|
||||
|
||||
# Test MongoDB server startup (press Ctrl-C to stop)
|
||||
/app/mongodb/product/current_version/bin/mongod --config=/app/mongodb/conf/mongod.conf --logpath=/app/mongodb/log/mongod.log
|
||||
|
||||
# Add to systemd as service
|
||||
# create /etc/systemd/system/mongod.service -->
|
||||
[Unit]
|
||||
Description=MongoDB
|
||||
After=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
# (file size)
|
||||
LimitFSIZE=infinity
|
||||
# (cpu time)
|
||||
LimitCPU=infinity
|
||||
# (virtual memory size)
|
||||
LimitAS=infinity
|
||||
# (locked-in-memory size)
|
||||
LimitMEMLOCK=infinity
|
||||
# (open files)
|
||||
LimitNOFILE=64000
|
||||
# (processes/threads)
|
||||
LimitNPROC=64000
|
||||
|
||||
User=mongod
|
||||
Group=mongod
|
||||
|
||||
ExecStart=/app/mongodb/product/current_version/bin/mongod --config /app/mongodb/conf/mongod.conf --logpath=/app/mongodb/log/mongod.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
# <--------------------------------------------
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl status mongod
|
||||
systemctl start mongod
|
||||
systemctl status mongod
|
||||
systemctl stop mongod
|
||||
systemctl status mongod
|
||||
systemctl start mongod
|
||||
systemctl enable mongod
|
||||
|
||||
# check listening port
|
||||
lsof -i -P | grep -i "listen"
|
||||
|
||||
# Test mongo shell
|
||||
/app/mongodb/product/current_version/bin/mongo --host ajara
|
||||
|
||||
# In order to avoid the message [Browserslist: caniuse-lite is outdated. Please run: npx browserslist@latest --update-db] when running MongoShell:
|
||||
export BROWSERSLIST_IGNORE_OLD_DATA=1
|
||||
|
||||
63
tiddlywiki/MongoDB - point in time recovery.txt
Executable file
63
tiddlywiki/MongoDB - point in time recovery.txt
Executable file
@@ -0,0 +1,63 @@
|
||||
~~ getting min/max timestamp in oplog can be done on PRIMARY or on any SECONDARY member of a replica set
|
||||
rs.slaveOk();
|
||||
|
||||
~~ display usefull oplog informations
|
||||
rs.printReplicationInfo()
|
||||
|
||||
use local
|
||||
db.oplog.rs.find({}, {ts: 1,}).sort({ts: -1}).limit(1)
|
||||
db.oplog.rs.find({}, {ts: 1,}).sort({ts: 1}).limit(1)
|
||||
|
||||
~~ exemple
|
||||
x=Timestamp(1590072867, 1)
|
||||
>> Timestamp(1590072867, 1)
|
||||
new Date(x.t * 1000)
|
||||
>> ISODate("2020-05-21T14:54:27Z")
|
||||
|
||||
x=Timestamp(1581603867, 1)
|
||||
>> Timestamp(1581603867, 1)
|
||||
new Date(x.t * 1000)
|
||||
>> ISODate("2020-02-13T14:24:27Z")
|
||||
|
||||
~~ note that a ISODate finishing by Z is a UTC date
|
||||
~~ pay attention to the diffrence between your local time and UTC; for example CEST=UTC+2
|
||||
|
||||
~~ exemple: find min/max timestamp for oplog records for the last hour
|
||||
var SECS_PER_HOUR = 3600
|
||||
var now = Math.floor((new Date().getTime()) / 1000) // seconds since epoch right now
|
||||
db.oplog.rs.find({ "ts" : { "$lt" : Timestamp(now, 1), "$gt" : Timestamp(now - SECS_PER_HOUR, 1) } }).sort({ts:-1}).limit(1);
|
||||
db.oplog.rs.find({ "ts" : { "$lt" : Timestamp(now, 1), "$gt" : Timestamp(now - SECS_PER_HOUR, 1) } }).sort({ts:1}).limit(1);
|
||||
|
||||
~~ exemple: list oplog records between 2 dates
|
||||
var since = Math.floor(ISODate("2020-05-21T15:43:16Z").getTime() / 1000)
|
||||
var until = Math.floor(ISODate("2020-05-21T15:43:18Z").getTime() / 1000)
|
||||
db.oplog.rs.find({ "ts" : { "$lt" : Timestamp(until, 1), "$gt" : Timestamp(since, 1) } })
|
||||
|
||||
~~ exemple: get lst oplog record before a date (usefull for Point In Time Recovery)
|
||||
var until = Math.floor(ISODate("2020-05-22T15:08:25Z").getTime() / 1000)
|
||||
db.oplog.rs.find({ "ts" : { "$lt" : Timestamp(until, 1) } }).sort({ts:-1}).limit(1);
|
||||
|
||||
~~ oplog is a collection, it can be dump umping mongodump tool
|
||||
mongodump -u superhero -p secret --authenticationDatabase admin -d local -c oplog.rs -o oplogdump
|
||||
~~ the format is BSON, if you want to query-it, you should to convert the file in JSON format:
|
||||
cd oplogdump/local
|
||||
bsondump oplog.rs.bson > archive.json
|
||||
|
||||
~~ Point In Time Recovery exemple for PITR=2020-05-22 17:08:25 CEST
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ create a new empty stand-alone MondgoDB without authentificztion and replica set in configuration file
|
||||
-- restore the last FULL BACKUP of data before your PITR
|
||||
~~ convert CEST in UTC: PITR=2020-05-22T15:08:25Z and note it down
|
||||
|
||||
~~ find the correspondind Timestamp in oplog
|
||||
var until = Math.floor(ISODate("2020-05-22T15:08:25Z").getTime() / 1000)
|
||||
db.oplog.rs.find({ "ts" : { "$lt" : Timestamp(until, 1) } }).sort({ts:-1}).limit(1);
|
||||
~~ in my exemple I obtained Timestamp(1590160104, 1)
|
||||
|
||||
~~ copy oplog.rs.bson file locally in a EMPTY folder and rename-it oplog.bson
|
||||
~~ in my exemple, the folder is: /mnt/yavin4/tmp/_mongodb_/tmp
|
||||
~~ optionally perform a dryrun in order to check your mongorestore command
|
||||
mongorestore --dryRun --oplogReplay --oplogLimit 1590160104:1 /mnt/yavin4/tmp/_mongodb_/tmp
|
||||
~~ recover until the corresponding Timespamp
|
||||
mongorestore --oplogReplay --oplogLimit 1590160104:1 /mnt/yavin4/tmp/_mongodb_/tmp
|
||||
|
||||
19
tiddlywiki/MongoDB - reconfigure replicaset examples.md
Executable file
19
tiddlywiki/MongoDB - reconfigure replicaset examples.md
Executable file
@@ -0,0 +1,19 @@
|
||||
## Change hostname
|
||||
|
||||
cfg = rs.conf()
|
||||
cfg.members[0].host = "ivera-mongo01.swgalaxy:27017"
|
||||
cfg.members[1].host = "ivera-mongo02.swgalaxy:27017"
|
||||
rs.reconfig(cfg)
|
||||
|
||||
|
||||
## Change priority
|
||||
|
||||
cfg = rs.conf()
|
||||
cfg.members[0].priority = 1
|
||||
cfg.members[1].priority = 1
|
||||
rs.reconfig(cfg)
|
||||
|
||||
## Add new member
|
||||
|
||||
rs.add('ivera-mongo03.swgalaxy:27017');
|
||||
|
||||
100
tiddlywiki/MongoDB - replication setup.txt
Executable file
100
tiddlywiki/MongoDB - replication setup.txt
Executable file
@@ -0,0 +1,100 @@
|
||||
~~ server names:
|
||||
~~ ajara
|
||||
~~ atrisia
|
||||
~~ anaxes
|
||||
|
||||
|
||||
~~ enable SCRAM authentification on ALL MongoDB instances
|
||||
mongo
|
||||
> use admin
|
||||
> db.createUser({ user: "superhero", pwd: "secret", roles: ["root"]});
|
||||
> db.shutdownServer();
|
||||
|
||||
~~ add in MongoDB configuration file ->
|
||||
security:
|
||||
authorization: 'enabled'
|
||||
<-------------------------------------
|
||||
|
||||
~~ start MongoDB instance
|
||||
/app/mongodb/product/current_version/bin/mongod --config=/app/mongodb/conf/mongod.conf --logpath=/app/mongodb/log/mongod.log --fork
|
||||
|
||||
~~ test connection
|
||||
mongo --username=superhero --password=secret
|
||||
|
||||
~~ for internal communication between instances we will use a basic keyFile method
|
||||
|
||||
~~ generate keyfile
|
||||
openssl rand -base64 756 > /app/mongodb/conf/keyfile.basic
|
||||
chmod 600 /app/mongodb/conf/keyfile.basic
|
||||
|
||||
~~ add the keyfile in MongoDB configuration file ->
|
||||
security:
|
||||
authorization: 'enabled'
|
||||
keyFile: /app/mongodb/conf/keyfile.basic
|
||||
<-------------------------------------
|
||||
|
||||
~~ restart MongoDB instance and test connection again
|
||||
/app/mongodb/product/current_version/bin/mongod --config=/app/mongodb/conf/mongod.conf --shutdown
|
||||
/app/mongodb/product/current_version/bin/mongod --config=/app/mongodb/conf/mongod.conf --logpath=/app/mongodb/log/mongod.log --fork
|
||||
|
||||
mongo --username=superhero --password=secret
|
||||
|
||||
~~ repeat theses operations on other 2 MongoDB instances using the SAME keyfile generated for the first instance
|
||||
|
||||
~~ for all MongoDB instances, declare the replication in configuration file
|
||||
|
||||
------------------------------------------>
|
||||
replication:
|
||||
replSetName: rs0
|
||||
<-----------------------------------------
|
||||
|
||||
|
||||
mongo --username=superhero --password=secret
|
||||
|
||||
rsconf = {
|
||||
_id: "rs0",
|
||||
members: [
|
||||
{
|
||||
_id: 0,
|
||||
host: "ajara:27017"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
rs.initiate(rsconf);
|
||||
|
||||
rs.add('atrisia:27017');
|
||||
rs.add('anaxes:27017');
|
||||
|
||||
rs.conf();
|
||||
rs.status();
|
||||
|
||||
|
||||
~~ ckeck if replication works
|
||||
~~ on PRIMARY instance create a database and a collection
|
||||
rs0:PRIMARY> use db01;
|
||||
rs0:PRIMARY> db.movies.insertOne({"title" : "Stand by Me"});
|
||||
|
||||
~~ on SECONDARIES check if the collection has been replicated
|
||||
~~ note that a slave, before running a query, we should activate the read-only acces using the following command
|
||||
rs0:SECONDARY> rs.slaveOk();
|
||||
|
||||
rs0:SECONDARY> use db01;
|
||||
rs0:SECONDARY> db.movies.find();
|
||||
|
||||
~~ finaly, drop the test database from the master node
|
||||
rs0:PRIMARY> db.dropDatabase();
|
||||
|
||||
~~ to user on SECONDARY replica to display lag and oplog size
|
||||
db.getReplicationInfo();
|
||||
|
||||
~~ to find the mester of a replica set, use the following command on any member of replica set
|
||||
db.isMaster();
|
||||
|
||||
~~ get replica set congig
|
||||
config = rs.conf();
|
||||
|
||||
~~ remove a member from a replica set
|
||||
rs.remove('anaxes:27017');
|
||||
rs.reconfig(config, {force: true});
|
||||
35
tiddlywiki/MongoDB - scratchpad.txt
Executable file
35
tiddlywiki/MongoDB - scratchpad.txt
Executable file
@@ -0,0 +1,35 @@
|
||||
> use db01
|
||||
-- create database db01 if it does not exists
|
||||
> db
|
||||
-- show current database
|
||||
> show dbs
|
||||
-- list databases
|
||||
-- db01 is not listed until it does not contain at least a document
|
||||
> db.movies.insertOne({"title" : "Stand by Me"})
|
||||
|
||||
|
||||
-- create index
|
||||
> db.users.createIndex({"age" : 1, "username" : 1});
|
||||
|
||||
-- list indexes of a collection
|
||||
> db.users.getIndexes();
|
||||
|
||||
-- show explain plan
|
||||
> db.users.find({"username": "user999999", "age":"19"}).explain("executionStats");
|
||||
|
||||
# https://www.mysoftkey.com/mongodb/how-to-enable-authentication-and-authorization-using-scram-sha-1-in-mongodb/
|
||||
|
||||
-- Connection String URI Format
|
||||
https://docs.mongodb.com/manual/reference/connection-string/index.html#connections-dns-seedlist
|
||||
|
||||
-- shutdown MongoDB
|
||||
> use admin
|
||||
> db.shutdownServer();
|
||||
|
||||
-- count(*) of a collection
|
||||
db.elements.countDocuments({})
|
||||
-- truncte a collection
|
||||
db.elements.remove({})
|
||||
-- display the last 5 inserted documents of a collection
|
||||
db.elements.find().sort({_id:-1}).limit(5);
|
||||
|
||||
188
tiddlywiki/MongoDB - setup SHARD notes.txt
Executable file
188
tiddlywiki/MongoDB - setup SHARD notes.txt
Executable file
@@ -0,0 +1,188 @@
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ CONGFIG servers ~~
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
-- IMPORTANT: note that the SECURITY are initially disabled
|
||||
|
||||
~~ example mongod.conf for CONFIG server
|
||||
----------------------------------------------->
|
||||
storage:
|
||||
dbPath: "/data/mongodb/"
|
||||
journal:
|
||||
enabled: true
|
||||
wiredTiger:
|
||||
engineConfig:
|
||||
cacheSizeGB: 1
|
||||
|
||||
net:
|
||||
port: 27017
|
||||
bindIp: 127.0.0.1,ivera-conf01,ivera-conf01-priv
|
||||
|
||||
#security:
|
||||
#authorization: 'enabled'
|
||||
#keyFile: /app/mongodb/conf/keyfile.basic
|
||||
|
||||
replication:
|
||||
replSetName: ivera_conf
|
||||
oplogSizeMB: 100
|
||||
enableMajorityReadConcern: true
|
||||
|
||||
sharding:
|
||||
clusterRole: configsvr
|
||||
<-----------------------------------------------
|
||||
|
||||
-- replication setup
|
||||
cfg = {
|
||||
_id : "ivera_conf",
|
||||
members : [ { "_id" : 0, "host":"ivera-conf01-priv:27017"},],
|
||||
configsvr: true,
|
||||
}
|
||||
rs.initiate(cfg)
|
||||
|
||||
rs.add('ivera-conf02-priv:27017');
|
||||
|
||||
rs.conf();
|
||||
rs.status();
|
||||
|
||||
-- security setup on PRIMARY
|
||||
use admin
|
||||
db.createUser({ user: "superhero", pwd: "secret", roles: ["root"]});
|
||||
|
||||
-- uncomment SECURITY lines from config file on PRIMARY/SECONDARY and restart MongoDB instances
|
||||
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
~~ DATA servers ~~
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
-- on DATA servers, the security can be implemented before or after replication setup
|
||||
|
||||
~~ example mongod.conf for DATA server
|
||||
----------------------------------------------->
|
||||
storage:
|
||||
dbPath: "/data/mongodb/"
|
||||
journal:
|
||||
enabled: true
|
||||
wiredTiger:
|
||||
engineConfig:
|
||||
cacheSizeGB: 1
|
||||
|
||||
net:
|
||||
port: 27017
|
||||
bindIp: 127.0.0.1,ivera-mongo01,ivera-mongo01-priv
|
||||
|
||||
security:
|
||||
authorization: 'enabled'
|
||||
keyFile: /app/mongodb/conf/keyfile.basic
|
||||
|
||||
replication:
|
||||
replSetName: ivera_data_01_02
|
||||
oplogSizeMB: 100
|
||||
enableMajorityReadConcern: true
|
||||
|
||||
sharding:
|
||||
clusterRole: shardsvr
|
||||
<-----------------------------------------------
|
||||
|
||||
-- replication setup
|
||||
cfg = {
|
||||
_id : "ivera_conf",
|
||||
members : [ { "_id" : 0, "host":"ivera-conf01-priv:27017"},],
|
||||
configsvr: true,
|
||||
}
|
||||
rs.initiate(cfg)
|
||||
|
||||
rs.add('ivera-conf02-priv:27017');
|
||||
|
||||
rs.conf();
|
||||
rs.status();
|
||||
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
~~ ROUTER servers ~~
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ example mongos.conf
|
||||
----------------------------------------------->
|
||||
net:
|
||||
port: 27017
|
||||
bindIp: 127.0.0.1,ivera-router01,ivera-router01-priv
|
||||
|
||||
sharding:
|
||||
configDB: "ivera_conf/ivera-conf01:27017,ivera-conf01:27017"
|
||||
|
||||
security:
|
||||
keyFile: /app/mongodb/conf/keyfile.basic
|
||||
<-----------------------------------------------
|
||||
|
||||
-- create SYSTEMD service for MongoDB Router
|
||||
-- create service unit file /etc/systemd/system/mongos.service
|
||||
----------------------------------------------->
|
||||
[Unit]
|
||||
Description=MongoDB Router
|
||||
After=multi-user.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
# (file size)
|
||||
LimitFSIZE=infinity
|
||||
# (cpu time)
|
||||
LimitCPU=infinity
|
||||
# (virtual memory size)
|
||||
LimitAS=infinity
|
||||
# (locked-in-memory size)
|
||||
LimitMEMLOCK=infinity
|
||||
# (open files)
|
||||
LimitNOFILE=64000
|
||||
# (processes/threads)
|
||||
LimitNPROC=64000
|
||||
|
||||
User=mongod
|
||||
Group=mongod
|
||||
|
||||
ExecStart=/app/mongodb/product/server/current_version/bin/mongos --config=/app/mongodb/conf/mongos.conf --logpath=/app/mongodb/log/mongos.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
<-----------------------------------------------
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start mongos
|
||||
systemctl status mongos
|
||||
systemctl enable mongos
|
||||
|
||||
-- connect to MongoDB Router un authentified mode and add shards
|
||||
mongo --username "superhero" --password "******"
|
||||
|
||||
sh.addShard( "ivera_data_01_02/ivera-mongo01-priv:27017")
|
||||
sh.addShard( "ivera_data_01_02/ivera-mongo02-priv:27017")
|
||||
sh.addShard( "ivera_data_03_04/ivera-mongo03-priv:27017")
|
||||
sh.addShard( "ivera_data_03_04/ivera-mongo04-priv:27017")
|
||||
sh.addShard( "ivera_data_05_06/ivera-mongo05-priv:27017")
|
||||
sh.addShard( "ivera_data_05_06/ivera-mongo06-priv:27017")
|
||||
|
||||
-- NOTE: a MongoDB router don't have any data locally -- except the mongos.conf file
|
||||
-- We can create multiple MongoDB routers and use a load balancer to redirect user's calls
|
||||
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
~~ Test Cluster ~~
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
-- create a database and activate sharding at the database level
|
||||
use exampleDB
|
||||
sh.enableSharding("exampleDB")
|
||||
|
||||
-- check database sharding
|
||||
use config
|
||||
db.databases.find()
|
||||
|
||||
-- create a collection and hash value of _id
|
||||
db.exampleCollection.ensureIndex( { _id : "hashed" } )
|
||||
|
||||
-- shard the collection
|
||||
sh.shardCollection( "exampleDB.exampleCollection", { "_id" : "hashed" } )
|
||||
|
||||
-- insert documents
|
||||
for (var i = 1; i <= 500; i++) db.exampleCollection.insert( { x : i } )
|
||||
|
||||
-- display collection documents distributions across shards
|
||||
db.exampleCollection.getShardDistribution()
|
||||
8
tiddlywiki/MongoDB.tid
Executable file
8
tiddlywiki/MongoDB.tid
Executable file
@@ -0,0 +1,8 @@
|
||||
color: #ff8000
|
||||
created: 20200128094934994
|
||||
creator: vplesnila
|
||||
modified: 20200128095150176
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: MongoDB
|
||||
type: text/vnd.tiddlywiki
|
||||
185
tiddlywiki/My Oracle Toolbox.md
Executable file
185
tiddlywiki/My Oracle Toolbox.md
Executable file
@@ -0,0 +1,185 @@
|
||||
Performance
|
||||
===========
|
||||
|
||||
Displaying daily top SQL for last 7 days:
|
||||
|
||||
@exadata/mon_topsql.sql
|
||||
|
||||
ASH
|
||||
---
|
||||
|
||||
ASH Report for SQL ID:
|
||||
|
||||
@ash/sqlid_activity.sql
|
||||
|
||||
|
||||
AWR
|
||||
---
|
||||
|
||||
Display Execution plan history from AWR:
|
||||
|
||||
@awr_xplan <sql_id> <PHV> <days> <where_condition>
|
||||
Example:
|
||||
@awr_xplan h6hYfr4esZrz % 14 "and 1=1"
|
||||
|
||||
Display SQL Text from AWR:
|
||||
|
||||
@awr/awr_sqlid <sql_id>
|
||||
|
||||
SQL Monitor
|
||||
-----------
|
||||
|
||||
List SQL Monitor reports based on a where clause:
|
||||
|
||||
@sqlmon_lsrep.sql <condition> <order by>
|
||||
# @sqlmon_lsrep "x.session_id='303'"
|
||||
# @sqlmon_lsrep "x.sql_id='g9n768y28mu9m'"
|
||||
# @sqlmon_lsrep "x.sql_id='g9n768y28mu9m'" 6 asc
|
||||
|
||||
SQL Monitor report detail:
|
||||
|
||||
@sqlmon_detrep <report_id> <type>
|
||||
# @sqlmon_detrep 172
|
||||
# @sqlmon_detrep 172 html
|
||||
# @sqlmon_detrep 172 active
|
||||
|
||||
Run DBMS_SQLTUNE.REPORT_SQL_MONITOR (text mode) for session:
|
||||
|
||||
@xp <session_id>
|
||||
|
||||
Run custom DBMS_SQLTUNE.REPORT_SQL_MONITOR:
|
||||
|
||||
@xprof <report_level> <type> <sql_id|session_id> <sql_id|sid>
|
||||
# Protect sql_id as in example: @xprof BASIC TEXT sql_id "'a4fqzw4mszwck'"
|
||||
|
||||
Explain plan
|
||||
------------
|
||||
|
||||
Display execution plan for last statement for this session from library cache:
|
||||
|
||||
@x.sql
|
||||
@xb.sql
|
||||
|
||||
Plan for library cache:
|
||||
|
||||
@xi <sql_id> <child#>
|
||||
@xbi <sql_id> <child#>
|
||||
|
||||
Plan for AWR:
|
||||
|
||||
@xawr.sql <sql_id> <child#>
|
||||
|
||||
Statistics
|
||||
----------
|
||||
Column stats details:
|
||||
|
||||
@stats_col <OWNER> <TABLE-NAME> % % % %
|
||||
|
||||
History of the optimizer statistic operations.
|
||||
Optionally filters on the start time in the format DD/MM/YYYY and the target name (which supports wildcards)
|
||||
|
||||
@list_optstat_history.sql <START_DATE> <TARGET>
|
||||
|
||||
List STATS operations:
|
||||
|
||||
@stats_opls <since> <until> <level> <format>
|
||||
# @stats_opls sysdate-14 sysdate BASIC TEXT
|
||||
# @stats_opls "timestamp'2023-01-12 14:00:00'" "timestamp'2023-02-12 14:00:00'" TYPICAL HTML
|
||||
|
||||
Detail of a STATS operation:
|
||||
|
||||
@stats_opdet <operation_id> <level> <format>
|
||||
# @stats_opdet 1482
|
||||
# @stats_opdet 1482 TYPICAL HTML
|
||||
|
||||
|
||||
Trace activation
|
||||
----------------
|
||||
|
||||
Display current trace file name:
|
||||
|
||||
@t
|
||||
|
||||
Activate/deactivate 10046 trace:
|
||||
|
||||
@46on <level>
|
||||
@46off
|
||||
|
||||
|
||||
Divers
|
||||
------
|
||||
|
||||
Display SQL_ID and PHV for the last SQL:
|
||||
|
||||
@hash
|
||||
|
||||
Display SQL hint:
|
||||
|
||||
@hint <HINT>
|
||||
|
||||
|
||||
Database layout
|
||||
===============
|
||||
|
||||
Tablespaces
|
||||
-----------
|
||||
|
||||
@tbs %
|
||||
|
||||
Redolog
|
||||
-------
|
||||
|
||||
Redolog informations
|
||||
|
||||
@redolog
|
||||
|
||||
Redolog switch history
|
||||
|
||||
@perf_log_switch_history_count_daily_all.sql
|
||||
|
||||
Oracle Directories
|
||||
|
||||
@dba_directories
|
||||
|
||||
Database links
|
||||
|
||||
@dblinks.sql
|
||||
|
||||
Table informations:
|
||||
|
||||
@dba_table_info
|
||||
@tab <owner>.<table_name>
|
||||
@tab_details <owner> <table_name>
|
||||
|
||||
Partition informations:
|
||||
|
||||
@part_info.sql
|
||||
@tabpart <owner>.<table_name>
|
||||
|
||||
@tab_parts_summary <owner> <table_name>
|
||||
@tab_parts <owner> <table_name>
|
||||
|
||||
Restore points:
|
||||
|
||||
@restore_points
|
||||
|
||||
Locks
|
||||
=====
|
||||
|
||||
Blocking locks tree RAC aware:
|
||||
|
||||
@raclock
|
||||
|
||||
Blocking Locks in the databases:
|
||||
|
||||
@locks_blocking.sql
|
||||
@locks_blocking2.sql
|
||||
|
||||
Undo
|
||||
====
|
||||
|
||||
Active undo segments and the sessions that are using them:
|
||||
|
||||
@undo_users.sql
|
||||
|
||||
|
||||
9
tiddlywiki/My private cloud - NAS share on Linux.txt
Executable file
9
tiddlywiki/My private cloud - NAS share on Linux.txt
Executable file
@@ -0,0 +1,9 @@
|
||||
yum install -y cifs-utils.x86_64
|
||||
mkdir -p /mnt/yavin4
|
||||
echo "//192.168.0.9/share /mnt/yavin4 cifs vers=2.0,uid=smbuser,gid=smbuser,file_mode=0775,dir_mode=0775,credentials=/root/.smbcred 0 0" >> /etc/fstab
|
||||
groupadd smbuser
|
||||
useradd smbuser -G smbuser -g smbuser
|
||||
echo "username=vpl" > /root/.smbcred
|
||||
echo "password=*****" >> /root/.smbcred
|
||||
mount -a
|
||||
df -h
|
||||
6
tiddlywiki/Network interface SPEED check.txt
Executable file
6
tiddlywiki/Network interface SPEED check.txt
Executable file
@@ -0,0 +1,6 @@
|
||||
-- for standard interface
|
||||
ethtool eth0
|
||||
|
||||
-- for infiniband
|
||||
ibstatus
|
||||
|
||||
60
tiddlywiki/Oracle - SQL Quarantine - example.md
Executable file
60
tiddlywiki/Oracle - SQL Quarantine - example.md
Executable file
@@ -0,0 +1,60 @@
|
||||
> [Original article](https://oracle-base.com/articles/19c/sql-quarantine-19c)
|
||||
|
||||
|
||||
|
||||
We can manually quarantine a statement based on SQL_ID or SQL_TEXT.
|
||||
Both methods accept a PLAN_HASH_VALUE parameter, which allows us to quarantine a single execution plan.
|
||||
If this is not specified, all execution plans for the statement are quarantined.
|
||||
|
||||
|
||||
-- Quarantine all execution plans for a SQL_ID.
|
||||
DECLARE
|
||||
l_sql_quarantine VARCHAR2(100);
|
||||
BEGIN
|
||||
l_sql_quarantine := sys.DBMS_SQLQ.create_quarantine_by_sql_id(
|
||||
sql_id => 'gs59hr0xtjrf8'
|
||||
);
|
||||
DBMS_OUTPUT.put_line('l_sql_quarantine=' || l_sql_quarantine);
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
SQL quarantine display:
|
||||
|
||||
set lines 256
|
||||
COLUMN sql_text FORMAT A50 TRUNC
|
||||
COLUMN plan_hash_value FORMAT 999999999999
|
||||
COLUMN name FORMAT A30
|
||||
COLUMN enabled FORMAT A3 HEAD "Ena"
|
||||
COLUMN cpu_time FORMAT A10
|
||||
COLUMN io_megabytes FORMAT A10
|
||||
COLUMN io_requests FORMAT A10
|
||||
COLUMN elapsed_time FORMAT A10
|
||||
COLUMN io_logical FORMAT A10
|
||||
|
||||
select
|
||||
name, enabled,cpu_time, io_megabytes, io_requests, elapsed_time, io_logical, plan_hash_value, sql_text
|
||||
from
|
||||
dba_sql_quarantine;
|
||||
|
||||
|
||||
The ALTER_QUARANTINE procedure allows us to alter the thresholds, to make them look more like automatically generated quarantines.
|
||||
We can use the procedure to alter the following parameters:
|
||||
|
||||
- CPU_TIME
|
||||
- ELAPSED_TIME
|
||||
- IO_MEGABYTES
|
||||
- IO_REQUESTS
|
||||
- IO_LOGICAL
|
||||
- ENABLED
|
||||
- AUTOPURGE
|
||||
|
||||
Example of setting the CPU_TIME threshold for the manually created quarantines:
|
||||
|
||||
BEGIN
|
||||
DBMS_SQLQ.alter_quarantine(
|
||||
quarantine_name => 'SQL_QUARANTINE_8zpc9pwdmb8vr',
|
||||
parameter_name => 'CPU_TIME',
|
||||
parameter_value => '1');
|
||||
END;
|
||||
/
|
||||
5
tiddlywiki/Oracle - external links.md
Executable file
5
tiddlywiki/Oracle - external links.md
Executable file
@@ -0,0 +1,5 @@
|
||||
- [Oracle 12.2 Cool New Features](https://gotodba.com/2016/09/22/oracle-12-2-cool-new-features/)
|
||||
- [Upgrade to 19(oracle-base)](https://oracle-base.com/articles/19c/upgrading-to-19c)
|
||||
- [Restoring a database without having any controlfile backup](https://blog.dbi-services.com/restoring-a-database-without-having-any-controlfile-backup/)
|
||||
- [What is the Oracle ASH time waited column?](https://blog.orapub.com/20150827/what-is-the-oracle-ash-time-waited-column.html)
|
||||
- [Data Pump API for PL/SQL (DBMS_DATAPUMP)](https://oracle-base.com/articles/misc/data-pump-api)
|
||||
9
tiddlywiki/Oracle 1.tid
Executable file
9
tiddlywiki/Oracle 1.tid
Executable file
@@ -0,0 +1,9 @@
|
||||
color: #800080
|
||||
created: 20190622084111439
|
||||
creator: vplesnila
|
||||
modified: 20190622233226722
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: Oracle
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
60
tiddlywiki/Oracle 19c manual CDB creation.md
Executable file
60
tiddlywiki/Oracle 19c manual CDB creation.md
Executable file
@@ -0,0 +1,60 @@
|
||||
initASTYPRD.ora:
|
||||
|
||||
db_name=ASTY
|
||||
instance_name=ASTYPRD
|
||||
db_unique_name=ASTYPRD
|
||||
compatible=19.0.0.0.0
|
||||
control_files=(/data/ASTYPRD/control01.ctl)
|
||||
db_create_file_dest=/data
|
||||
db_create_online_log_dest_1=/data
|
||||
db_recovery_file_dest_size=4G
|
||||
db_recovery_file_dest=/fra
|
||||
log_archive_dest_1='location=USE_DB_RECOVERY_FILE_DEST'
|
||||
log_archive_format=%t_%s_%r.arc
|
||||
db_block_size=8192
|
||||
open_cursors=300
|
||||
diagnostic_dest=/app/oracle/base/admin/ASTYPRD
|
||||
sga_max_size=3G
|
||||
sga_target=3G
|
||||
pga_aggregate_target=512M
|
||||
pga_aggregate_limit=2G
|
||||
processes=350
|
||||
audit_file_dest=/app/oracle/base/admin/ASTYPRD/adump
|
||||
audit_trail=db
|
||||
remote_login_passwordfile=exclusive
|
||||
undo_tablespace=TS_UNDO
|
||||
enable_pluggable_database=TRUE
|
||||
|
||||
|
||||
Create database:
|
||||
|
||||
spool createdb.log
|
||||
|
||||
create database ASTY
|
||||
datafile size 700M autoextend on next 64M
|
||||
extent management local
|
||||
SYSAUX datafile size 512M autoextend on next 64M
|
||||
default temporary tablespace TS_TEMP tempfile size 256M autoextend off
|
||||
undo tablespace TS_UNDO datafile size 256M autoextend off
|
||||
character set AL32UTF8
|
||||
national character set AL16UTF16
|
||||
logfile group 1 size 64M,
|
||||
group 2 size 64M
|
||||
user SYS identified by secret user SYSTEM identified by secret
|
||||
enable pluggable database;
|
||||
|
||||
create tablespace USERS datafile size 32M autoextend ON next 32M;
|
||||
alter database default tablespace USERS;
|
||||
|
||||
spool off
|
||||
|
||||
|
||||
|
||||
Ensure using Oracle provided perl:
|
||||
|
||||
export PATH=$ORACLE_HOME/perl/bin:$PATH
|
||||
|
||||
Run `catcdb.sql` providing the required informations:
|
||||
|
||||
@?/rdbms/admin/catcdb.sql
|
||||
|
||||
16
tiddlywiki/Oracle RAC - create network and add listener.txt
Executable file
16
tiddlywiki/Oracle RAC - create network and add listener.txt
Executable file
@@ -0,0 +1,16 @@
|
||||
# list interface usage
|
||||
oifcfg getif
|
||||
# list existing networks
|
||||
srvctl config network
|
||||
|
||||
# vortex-db01-dba-vip: 192.168.3.88
|
||||
# vortex-db02-dba-vip: 192.168.3.90
|
||||
# as ROOT user
|
||||
srvctl add network -netnum 2 -subnet 192.168.3.0/255.255.255.0/eth3 -nettype STATIC
|
||||
srvctl add vip -node vortex-db01 -address vortex-db01-dba-vip/255.255.255.0/eth3 -netnum 2
|
||||
srvctl add vip -node vortex-db02 -address vortex-db02-dba-vip/255.255.255.0/eth3 -netnum 2
|
||||
srvctl start vip -vip vortex-db01-dba-vip
|
||||
srvctl start vip -vip vortex-db02-dba-vip
|
||||
# as GRID user
|
||||
srvctl add listener -listener LISTENER_DG -netnum 2 -endpoints TCP:1600
|
||||
srvctl start listener -listener LISTENER_DG
|
||||
40
tiddlywiki/Oracle RAC - divers.txt
Executable file
40
tiddlywiki/Oracle RAC - divers.txt
Executable file
@@ -0,0 +1,40 @@
|
||||
-- verify software integrity
|
||||
cluvfy comp software -n all -verbose
|
||||
|
||||
-- MGMTDB creation
|
||||
------------------
|
||||
$GRID_HOME/bin/dbca -silent -createDatabase -templateName MGMTSeed_Database.dbc -sid -MGMTDB -gdbName _mgmtdb -storageType ASM -diskGroupName +DATA -datafileJarLocation $GRID_HOME/assistants/dbca/templates -characterset AL32UTF8 -autoGeneratePasswords -oui_internal
|
||||
|
||||
|
||||
-- Wallet creation for patching
|
||||
-------------------------------
|
||||
cd /app/grid/product/12cR2/grid_1/OPatch/auto/core/bin
|
||||
./patchingWallet.sh -walletDir /home/grid -create grid:theron-db01:ssh grid:theron-db02:ssh root:theron-db01:ssh root:theron-db02:ssh -log /home/grid/wallet.log
|
||||
|
||||
|
||||
cd /app/oracle/product/12cR2/db_1/OPatch/auto/core/bin
|
||||
./patchingWallet.sh -walletDir /home/oracle -create oracle:theron-db01:ssh oracle:theron-db02:ssh root:theron-db01:ssh root:theron-db02:ssh -log /home/oracle/wallet.log
|
||||
|
||||
|
||||
-- Patch apply with opatchauto
|
||||
------------------------------
|
||||
/app/grid/product/12cR2/grid_1/OPatch/opatchauto apply /home/grid/tmp/26610291 -oh /app/grid/product/12cR2/grid_1 -wallet /home/grid
|
||||
|
||||
-- Install the last version of OPach
|
||||
------------------------------------
|
||||
|
||||
As root user, after uncompres the downloaded of the last OPatch under /mnt/yavin4/tmp/0/01/OPatch
|
||||
|
||||
cd /app/grid/product/12cR2/grid_1/
|
||||
OPatch/opatchauto version
|
||||
rm -rf OPatch/
|
||||
cp -R /mnt/yavin4/tmp/0/01/OPatch .
|
||||
chown -R grid:oinstall OPatch
|
||||
OPatch/opatchauto version
|
||||
|
||||
cd /app/oracle/product/12cR2/db_1/
|
||||
OPatch/opatchauto version
|
||||
rm -rf OPatch/
|
||||
cp -R /mnt/yavin4/tmp/0/01/OPatch .
|
||||
chown -R oracle:oinstall OPatch
|
||||
OPatch/opatchauto version
|
||||
26
tiddlywiki/Oracle RAC os users setup.txt
Executable file
26
tiddlywiki/Oracle RAC os users setup.txt
Executable file
@@ -0,0 +1,26 @@
|
||||
# https://docs.oracle.com/en/database/oracle/oracle-database/19/cwlin/identifying-an-oracle-software-owner-user-account.html#GUID-0A95F4B1-1045-455D-9897-A23012E4E27F
|
||||
|
||||
$ grep "oinstall" /etc/group
|
||||
oinstall:x:54321:grid,oracle
|
||||
|
||||
$ id oracle
|
||||
uid=54321(oracle) gid=54321(oinstall) groups=54321(oinstall),54322(dba),
|
||||
54323(oper),54324(backupdba),54325(dgdba),54326(kmdba),54327(asmdba),54330(racdba)
|
||||
|
||||
|
||||
$ id grid
|
||||
uid=54331(grid) gid=54321(oinstall) groups=54321(oinstall),54322(dba),
|
||||
54327(asmdba),54328(asmoper),54329(asmadmin),54330(racdba)
|
||||
|
||||
# extract from /etc/group
|
||||
|
||||
oinstall:x:54321:
|
||||
dba:x:54322:oracle,grid
|
||||
oper:x:54323:oracle
|
||||
backupdba:x:54324:oracle
|
||||
dgdba:x:54325:oracle
|
||||
kmdba:x:54326:oracle
|
||||
racdba:x:54330:oracle,grid
|
||||
asmoper:x:54327:grid
|
||||
asmdba:x:54328:grid,oracle
|
||||
asmadmin:x:54329:grid
|
||||
211
tiddlywiki/Oracle SSL connection.md
Executable file
211
tiddlywiki/Oracle SSL connection.md
Executable file
@@ -0,0 +1,211 @@
|
||||
## Source
|
||||
|
||||
- https://oracle-base.com/articles/misc/configure-tcpip-with-ssl-and-tls-for-database-connections
|
||||
|
||||
|
||||
## Folder creation for configuration files
|
||||
|
||||
|
||||
mkdir -p /mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet
|
||||
mkdir -p /mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet
|
||||
mkdir -p /mnt/yavin4/tmp/_oracle_/labo_ssl/client/tnsadmin
|
||||
mkdir -p /mnt/yavin4/tmp/_oracle_/labo_ssl/exchange_zone/
|
||||
|
||||
## Server wallet and certificate
|
||||
|
||||
Create the wallet:
|
||||
|
||||
orapki wallet create -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase" -auto_login_local
|
||||
|
||||
Create certificate in wallet:
|
||||
|
||||
orapki wallet add -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase" \
|
||||
-dn "CN=`hostname`" -keysize 1024 -self_signed -validity 3650
|
||||
|
||||
Display wallet contents:
|
||||
|
||||
orapki wallet display -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase"
|
||||
|
||||
Export certificate:
|
||||
|
||||
orapki wallet export -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase" \
|
||||
-dn "CN=`hostname`" -cert /mnt/yavin4/tmp/_oracle_/labo_ssl/exchange_zone/`hostname`-certificate.crt
|
||||
|
||||
|
||||
## Client wallet and certificate
|
||||
|
||||
Create the wallet:
|
||||
|
||||
orapki wallet create -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#" -auto_login_local
|
||||
|
||||
Create certificate in wallet:
|
||||
|
||||
orapki wallet add -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#" \
|
||||
-dn "CN=`hostname`" -keysize 1024 -self_signed -validity 3650
|
||||
|
||||
Display wallet contents:
|
||||
|
||||
orapki wallet display -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#"
|
||||
|
||||
Export certificate:
|
||||
|
||||
orapki wallet export -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#" \
|
||||
-dn "CN=`hostname`" -cert /mnt/yavin4/tmp/_oracle_/labo_ssl/exchange_zone/`hostname`-certificate.crt
|
||||
|
||||
|
||||
## Exchange certificates between server and client
|
||||
|
||||
Load client certificate into server wallet:
|
||||
|
||||
orapki wallet add -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase" \
|
||||
-trusted_cert -cert /mnt/yavin4/tmp/_oracle_/labo_ssl/exchange_zone/taris.swgalaxy-certificate.crt
|
||||
|
||||
|
||||
Display server wallet contents:
|
||||
|
||||
orapki wallet display -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet" -pwd "C0mpl1cated#Ph|rase"
|
||||
|
||||
|
||||
Load server certificate into client wallet:
|
||||
|
||||
orapki wallet add -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#" \
|
||||
-trusted_cert -cert /mnt/yavin4/tmp/_oracle_/labo_ssl/exchange_zone/mandalore.swgalaxy-certificate.crt
|
||||
|
||||
|
||||
Display client wallet contents:
|
||||
|
||||
orapki wallet display -wallet "/mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet" -pwd "1m#the|Client#"
|
||||
|
||||
|
||||
## Server network configuration
|
||||
|
||||
> I did not succed to user custom `$TNS_ADMIN` location for server configuration files
|
||||
|
||||
> In this example we will register the database on standard `LISTENER` and on custom `LISTENER_APP` listeners
|
||||
|
||||
Edit `$ORACLE_HOME/network/admin/sqlnet.ora`:
|
||||
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet)
|
||||
)
|
||||
)
|
||||
SQLNET.AUTHENTICATION_SERVICES = (TCPS,NTS,BEQ)
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
SSL_CIPHER_SUITES = (SSL_RSA_WITH_AES_256_CBC_SHA, SSL_RSA_WITH_3DES_EDE_CBC_SHA)
|
||||
|
||||
|
||||
Edit `$ORACLE_HOME/network/admin/listener.ora`:
|
||||
|
||||
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/_oracle_/labo_ssl/server/wallet)
|
||||
)
|
||||
)
|
||||
LISTENER_APP =
|
||||
(DESCRIPTION_LIST =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = mandalore.swgalaxy)(PORT = 12000))
|
||||
(ADDRESS = (PROTOCOL = TCPS)(HOST = mandalore.swgalaxy)(PORT = 24000))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
Edit `$ORACLE_HOME/network/admin/tnsnames.ora`:
|
||||
|
||||
LOCAL_LISTENER =
|
||||
(DESCRIPTION_LIST =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = mandalore.swgalaxy)(PORT = 1521))
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = mandalore.swgalaxy)(PORT = 12000))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
Set `local_listener` at the database level:
|
||||
|
||||
alter system set local_listener='LOCAL_LISTENER' scope=memory sid='*';
|
||||
alter system register;
|
||||
|
||||
|
||||
## Client network configuration
|
||||
|
||||
export TNS_ADMIN=/mnt/yavin4/tmp/_oracle_/labo_ssl/client/tnsadmin
|
||||
|
||||
|
||||
Edit `$TNS_ADMIN/sqlnet.ora`:
|
||||
|
||||
WALLET_LOCATION =
|
||||
(SOURCE =
|
||||
(METHOD = FILE)
|
||||
(METHOD_DATA =
|
||||
(DIRECTORY = /mnt/yavin4/tmp/_oracle_/labo_ssl/client/wallet)
|
||||
)
|
||||
)
|
||||
|
||||
SQLNET.AUTHENTICATION_SERVICES = (TCPS,NTS)
|
||||
SSL_CLIENT_AUTHENTICATION = FALSE
|
||||
SSL_CIPHER_SUITES = (SSL_RSA_WITH_AES_256_CBC_SHA, SSL_RSA_WITH_3DES_EDE_CBC_SHA)
|
||||
|
||||
|
||||
Edit `$TNS_ADMIN/tnsnames.ora`:
|
||||
|
||||
EWOKPRD_APP_SSL=
|
||||
(DESCRIPTION=
|
||||
(ADDRESS=
|
||||
(PROTOCOL=TCPS)(HOST=mandalore.swgalaxy)(PORT=24000)
|
||||
)
|
||||
(CONNECT_DATA=
|
||||
(SERVICE_NAME=EWOKPRD)
|
||||
)
|
||||
)
|
||||
|
||||
EWOKPRD_STANDARD=
|
||||
(DESCRIPTION=
|
||||
(ADDRESS=
|
||||
(PROTOCOL=TCP)(HOST=mandalore.swgalaxy)(PORT=1521)
|
||||
)
|
||||
(CONNECT_DATA=
|
||||
(SERVICE_NAME=EWOKPRD)
|
||||
)
|
||||
)
|
||||
|
||||
EWOKPRD_APP=
|
||||
(DESCRIPTION=
|
||||
(ADDRESS=
|
||||
(PROTOCOL=TCP)(HOST=mandalore.swgalaxy)(PORT=12000)
|
||||
)
|
||||
(CONNECT_DATA=
|
||||
(SERVICE_NAME=EWOKPRD)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
Test connections:
|
||||
|
||||
connect system/*****@EWOKPRD_APP_SSL
|
||||
connect system/*****@EWOKPRD_APP
|
||||
connect system/*****@EWOKPRD_STANDARD
|
||||
|
||||
|
||||
Get the current protocol for your session:
|
||||
|
||||
select SYS_CONTEXT('USERENV','NETWORK_PROTOCOL') from dual;
|
||||
|
||||
|
||||
Use the following query do display the current network options for your session:
|
||||
|
||||
select NETWORK_SERVICE_BANNER
|
||||
from v$session_connect_info
|
||||
where SID = sys_context('USERENV','SID');
|
||||
|
||||
- If you get a row with NETWORK_SERVICE_BANNER like '%TCP/IP%', then you use TCP (without SSL)
|
||||
- If you get a row with NETWORK_SERVICE_BANNER like '%BEQUEATH%, then you use Bequeath (LOCAL=YES)
|
||||
- If you get a row with NETWORK_SERVICE_BANNER is null, then you use TCPS
|
||||
7
tiddlywiki/Oracle Toolbox Example.tid
Executable file
7
tiddlywiki/Oracle Toolbox Example.tid
Executable file
@@ -0,0 +1,7 @@
|
||||
created: 20200831160216240
|
||||
creator: vplesnila
|
||||
modified: 20200831160219460
|
||||
modifier: vplesnila
|
||||
tags: Oracle
|
||||
title: Oracle Toolbox Example
|
||||
type: text/vnd.tiddlywiki
|
||||
119
tiddlywiki/Oracle resource manager example.md
Executable file
119
tiddlywiki/Oracle resource manager example.md
Executable file
@@ -0,0 +1,119 @@
|
||||
> [Original article](https://oracle-base.com/articles/8i/resource-manager-8i)
|
||||
|
||||
Create application users:
|
||||
|
||||
create user web_user identified by "iN_j8sC#d!kX6b:_";
|
||||
create user batch_user identified by "r~65ktuFYyds+P_X";
|
||||
grant connect,resource to web_user;
|
||||
grant connect,resource to batch_user;
|
||||
|
||||
|
||||
Create a pending area:
|
||||
|
||||
BEGIN
|
||||
DBMS_RESOURCE_MANAGER.clear_pending_area;
|
||||
DBMS_RESOURCE_MANAGER.create_pending_area;
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Create a plan:
|
||||
|
||||
BEGIN
|
||||
DBMS_RESOURCE_MANAGER.create_plan(
|
||||
plan => 'hybrid_plan',
|
||||
comment => 'Plan for a combination of high and low priority tasks.');
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Create a web and a batch consumer group:
|
||||
|
||||
BEGIN
|
||||
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||
consumer_group => 'WEB_CG',
|
||||
comment => 'Web based OTLP processing - high priority');
|
||||
|
||||
DBMS_RESOURCE_MANAGER.create_consumer_group(
|
||||
consumer_group => 'BATCH_CG',
|
||||
comment => 'Batch processing - low priority');
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Assign the consumer groups to the plan and indicate their relative priority, remembering to add the OTHER_GROUPS plan directive:
|
||||
|
||||
|
||||
BEGIN
|
||||
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||
plan => 'hybrid_plan',
|
||||
group_or_subplan => 'web_cg',
|
||||
comment => 'High Priority',
|
||||
cpu_p1 => 80,
|
||||
cpu_p2 => 0,
|
||||
parallel_degree_limit_p1 => 4);
|
||||
|
||||
DBMS_RESOURCE_MANAGER.create_plan_directive (
|
||||
plan => 'hybrid_plan',
|
||||
group_or_subplan => 'batch_cg',
|
||||
comment => 'Low Priority',
|
||||
cpu_p1 => 0,
|
||||
cpu_p2 => 80,
|
||||
parallel_degree_limit_p1 => 4);
|
||||
|
||||
DBMS_RESOURCE_MANAGER.create_plan_directive(
|
||||
plan => 'hybrid_plan',
|
||||
group_or_subplan => 'OTHER_GROUPS',
|
||||
comment => 'all other users - level 3',
|
||||
cpu_p1 => 0,
|
||||
cpu_p2 => 0,
|
||||
cpu_p3 => 100);
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Validate and apply the resource plan:
|
||||
|
||||
BEGIN
|
||||
DBMS_RESOURCE_MANAGER.validate_pending_area;
|
||||
DBMS_RESOURCE_MANAGER.submit_pending_area;
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Assign our users to individual consumer groups:
|
||||
|
||||
BEGIN
|
||||
-- Assign users to consumer groups
|
||||
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||
grantee_name => 'web_user',
|
||||
consumer_group => 'web_cg',
|
||||
grant_option => FALSE);
|
||||
|
||||
DBMS_RESOURCE_MANAGER_PRIVS.grant_switch_consumer_group(
|
||||
grantee_name => 'batch_user',
|
||||
consumer_group => 'batch_cg',
|
||||
grant_option => FALSE);
|
||||
|
||||
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('web_user', 'web_cg');
|
||||
|
||||
DBMS_RESOURCE_MANAGER.set_initial_consumer_group('batch_user', 'batch_cg');
|
||||
END;
|
||||
/
|
||||
|
||||
|
||||
Connect users:
|
||||
|
||||
connect web_user/"iN_j8sC#d!kX6b:_"
|
||||
connect batch_user/"r~65ktuFYyds+P_X"
|
||||
|
||||
Check `resource_consumer_group` column in `v$session`:
|
||||
|
||||
SELECT username, resource_consumer_group
|
||||
FROM v$session
|
||||
WHERE username IN ('WEB_USER','BATCH_USER');
|
||||
|
||||
Note that the value change for a connecte session if `RESOURCE_MANAGER_PLAN` change at instance level:
|
||||
|
||||
alter system set RESOURCE_MANAGER_PLAN = 'hybrid_plan' scope=both sid='*';
|
||||
alter system set RESOURCE_MANAGER_PLAN = '' scope=both sid='*';
|
||||
9
tiddlywiki/Oracle scripts.tid
Executable file
9
tiddlywiki/Oracle scripts.tid
Executable file
@@ -0,0 +1,9 @@
|
||||
color: #0000a0
|
||||
created: 20190622074003397
|
||||
creator: vplesnila
|
||||
modified: 20190715082632837
|
||||
modifier: vplesnila
|
||||
tags: Oracle
|
||||
title: Oracle scripts
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
15
tiddlywiki/Oracle toolbox.tid
Executable file
15
tiddlywiki/Oracle toolbox.tid
Executable file
@@ -0,0 +1,15 @@
|
||||
created: 20200823075307403
|
||||
creator: vplesnila
|
||||
modified: 20200831161547163
|
||||
modifier: vplesnila
|
||||
tags: Oracle
|
||||
title: Oracle toolbox
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
|
||||
|!usage |!description |!notes|
|
||||
|''@ash/ashtop'' [grouping_cols] [filters] [fromtime] [totime]|Display top ASH time (count of ASH samples) grouped by your specified dimensions|[[exemples|ashtop]]|
|
||||
|''@ash/ash_wait_chains'' [grouping_cols] [filters] [fromtime] [totime]|Display ASH (based on DBA_HIST) wait chains (multi-session wait signature, a session waiting for another session etc.)|[[exemples|ash_wait_chains]]|
|
||||
|''@x'' |Display SQL execution plan for the last SQL statement||
|
||||
|
||||
|
||||
38
tiddlywiki/Orcle Resource Manager.txt
Executable file
38
tiddlywiki/Orcle Resource Manager.txt
Executable file
@@ -0,0 +1,38 @@
|
||||
select name from v$rsrc_plan where is_top_plan='TRUE' and cpu_managed='ON';
|
||||
|
||||
col plan for a30
|
||||
col group_or_subplan for a30
|
||||
|
||||
select plan, group_or_subplan, type, cpu_p1, cpu_p2, cpu_p3, cpu_p4, status
|
||||
from dba_rsrc_plan_directives order by 1,2,3,4,5,6 desc;
|
||||
|
||||
|
||||
SELECT group_or_subplan,
|
||||
cpu_p1,
|
||||
mgmt_p1,
|
||||
mgmt_p2,
|
||||
mgmt_p3,
|
||||
mgmt_p4,
|
||||
mgmt_p5,
|
||||
mgmt_p6,
|
||||
mgmt_p7,
|
||||
mgmt_p8,
|
||||
max_utilization_limit
|
||||
FROM dba_rsrc_plan_directives
|
||||
WHERE plan = (SELECT name
|
||||
FROM v$rsrc_plan
|
||||
WHERE is_top_plan = 'TRUE');
|
||||
|
||||
SELECT TO_CHAR (m.begin_time, 'YYYY-MM-DD HH24:MI:SS') time,
|
||||
m.consumer_group_name,
|
||||
m.cpu_consumed_time / 60000 avg_running_sessions,
|
||||
m.cpu_wait_time / 60000 avg_waiting_sessions,
|
||||
d.mgmt_p1
|
||||
* (SELECT VALUE
|
||||
FROM v$parameter
|
||||
WHERE name = 'cpu_count')
|
||||
/ 100
|
||||
allocation
|
||||
FROM v$rsrcmgrmetric_history m, dba_rsrc_plan_directives d, v$rsrc_plan p
|
||||
WHERE m.consumer_group_name = d.group_or_subplan AND p.name = d.plan
|
||||
ORDER BY m.begin_time, m.consumer_group_name;
|
||||
82
tiddlywiki/PDB - divers.txt
Executable file
82
tiddlywiki/PDB - divers.txt
Executable file
@@ -0,0 +1,82 @@
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ configurable spfile parameters
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
COLUMN name FORMAT A35
|
||||
COLUMN value FORMAT A35
|
||||
|
||||
SELECT name, value
|
||||
FROM v$system_parameter
|
||||
WHERE ispdb_modifiable = 'TRUE'
|
||||
ORDER BY name;
|
||||
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
~~ Rename of a PDB
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
~~ rename PDB database from JABBAPRD to ZAX
|
||||
alter pluggable database JABBAPRD close immediate;
|
||||
alter pluggable database JABBAPRD open restricted;
|
||||
alter session set container=JABBAPRD;
|
||||
alter pluggable database rename global_name to ZAX;
|
||||
alter pluggable database ZAX close immediate;
|
||||
alter pluggable database ZAX open;
|
||||
alter pluggable database ZAX save state;
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~ Switch a CDB in LOCAL_UNDO mode
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ check if LOCAL_UNDO is enable
|
||||
COLUMN property_name FORMAT A30
|
||||
COLUMN property_value FORMAT A30
|
||||
|
||||
SELECT property_name, property_value
|
||||
FROM database_properties
|
||||
WHERE property_name = 'LOCAL_UNDO_ENABLED';
|
||||
|
||||
|
||||
|
||||
~~ disable cluster_database and stop the database
|
||||
alter system set cluster_database=false scope=spfile sid='*';
|
||||
srvctl stop database -db HUTTPRD
|
||||
|
||||
~~ start just one instance in upgrade mode
|
||||
startup upgrade;
|
||||
~~ enable LOCAL_UNDO
|
||||
alter database local undo ON;
|
||||
|
||||
~~ enable cluster_database and start the database
|
||||
alter system set cluster_database=true scope=spfile sid='*';
|
||||
|
||||
~~ stop the instance and start database
|
||||
srvctl start database -db HUTTPRD
|
||||
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
~~ Refreshable PDB
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
~~ context
|
||||
~~ source ZAX@HUTTPRD(on vortex-scan)
|
||||
~~ target HARRA@BOTHAN(on kessel-scan)
|
||||
|
||||
~~ on source HUTTPRD
|
||||
CREATE USER c##kaminoan IDENTIFIED BY secret CONTAINER=ALL;
|
||||
GRANT CREATE SESSION, CREATE PLUGGABLE DATABASE TO c##kaminoan CONTAINER=ALL;
|
||||
|
||||
~~ on target BOTHAN
|
||||
CREATE DATABASE LINK kaminoan_link
|
||||
CONNECT TO c##kaminoan IDENTIFIED BY secret USING 'vortex-scan/HUTTPRD';
|
||||
|
||||
select * from dual@kaminoan_link;
|
||||
|
||||
create pluggable database HARRA from ZAX@kaminoan_link parallel 2 refresh mode manual;
|
||||
alter pluggable database HARRA open read only instances=ALL;
|
||||
|
||||
SELECT status, refresh_mode FROM dba_pdbs WHERE pdb_name = 'HARRA';
|
||||
|
||||
~~ to perform a refresh
|
||||
alter pluggable database HARRA close immediate instances=ALL;
|
||||
alter pluggable database HARRA refresh;
|
||||
alter pluggable database HARRA open read only instances=ALL;
|
||||
|
||||
|
||||
|
||||
101
tiddlywiki/PDB clone examples.md
Executable file
101
tiddlywiki/PDB clone examples.md
Executable file
@@ -0,0 +1,101 @@
|
||||
Clone PDB from a remote CDB using RMAN "from active database"
|
||||
-------------------------------------------------------------
|
||||
|
||||
On target CDB, set the source CDB archivelog location:
|
||||
|
||||
alter system set REMOTE_RECOVERY_FILE_DEST='/fra' scope=MEMORY sid='*';
|
||||
|
||||
Run RMAN duplicate command:
|
||||
|
||||
rman target='sys/"*****"@taris/ASTYPRD' auxiliary='sys/"*****"@mandalore/ELLOPRD'
|
||||
|
||||
run
|
||||
{
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
allocate auxiliary channel aux05 device type disk;
|
||||
allocate auxiliary channel aux06 device type disk;
|
||||
allocate auxiliary channel aux07 device type disk;
|
||||
allocate auxiliary channel aux08 device type disk;
|
||||
allocate auxiliary channel aux09 device type disk;
|
||||
allocate auxiliary channel aux10 device type disk;
|
||||
duplicate pluggable database WEDGEPRD as ANTILLESPRD
|
||||
from active database using compressed backupset section size 400M;
|
||||
}
|
||||
|
||||
|
||||
Clone PDB from a remote CDB through a database link
|
||||
---------------------------------------------------
|
||||
|
||||
On source CDB create an user to be use by the database link:
|
||||
|
||||
create user c##adminpdb identified by adminpdb container=ALL;
|
||||
grant create session, create pluggable database to c##adminpdb container=all;
|
||||
|
||||
|
||||
On target CDB create the database link and clone the remote PDB.
|
||||
|
||||
create database link ASTYPRD connect to c##adminpdb identified by "adminpdb" using 'taris/ASTYPRD';
|
||||
select * from dual@ASTYPRD;
|
||||
|
||||
create pluggable database ANTILLESPRD from WEDGEPRD@ASTYPRD parallel 10;
|
||||
alter pluggable database ANTILLESPRD open;
|
||||
|
||||
|
||||
> Note that in both method we can choose the parallelism degree.
|
||||
|
||||
|
||||
Clone PDB from a remote CDB using a RMAN backup
|
||||
-----------------------------------------------
|
||||
|
||||
Beacause in Oracle 21c is is still not possible to duplicate a pluggable database directly from a backup (aka *duplicate backup location*), wi will perform this operation in 2 steps:
|
||||
1. duplicate from location the *root* PDB + source PDB into an auxiliary CDB
|
||||
2. unplug the PDB from auxiliary CDB and plug it on target PDB
|
||||
|
||||
|
||||
> A *set until time* clause can be specified in duplicate command.
|
||||
|
||||
Start AUXCDB CDB instance using a basic spfile, therefore run the duplicate command:
|
||||
|
||||
rman auxiliary /
|
||||
|
||||
run
|
||||
{
|
||||
allocate auxiliary channel aux01 device type disk;
|
||||
allocate auxiliary channel aux02 device type disk;
|
||||
allocate auxiliary channel aux03 device type disk;
|
||||
allocate auxiliary channel aux04 device type disk;
|
||||
allocate auxiliary channel aux05 device type disk;
|
||||
allocate auxiliary channel aux06 device type disk;
|
||||
allocate auxiliary channel aux07 device type disk;
|
||||
allocate auxiliary channel aux08 device type disk;
|
||||
allocate auxiliary channel aux09 device type disk;
|
||||
allocate auxiliary channel aux10 device type disk;
|
||||
set until time "TIMESTAMP'2021-11-08 15:40:00'";
|
||||
duplicate database to AUXCDB
|
||||
pluggable database WEDGEPRD,root
|
||||
backup location '/mnt/yavin4/tmp/_oracle_/orabackup/ASTY';
|
||||
}
|
||||
|
||||
|
||||
Unplug PDB from auxiliary CDB:
|
||||
|
||||
alter pluggable database WEDGEPRD close immediate;
|
||||
alter pluggable database WEDGEPRD open read only;
|
||||
|
||||
|
||||
alter session set container=WEDGEPRD;
|
||||
exec DBMS_PDB.DESCRIBE('/mnt/yavin4/tmp/_oracle_/tmp/WEDGE.xml');
|
||||
alter pluggable database WEDGEPRD close immediate;
|
||||
|
||||
|
||||
Plug in PDB on target CDB (with copy, move or nocopy option):
|
||||
|
||||
create pluggable database ANTILLESPRD using '/mnt/yavin4/tmp/_oracle_/tmp/WEDGE.xml' move;
|
||||
alter pluggable database ANTILLESPRD open;
|
||||
alter pluggable database ANTILLESPRD save state;
|
||||
|
||||
|
||||
At this momment we can destroy auxiliary CDB.
|
||||
21
tiddlywiki/PL_SQL insert lines for testing PITR.txt
Executable file
21
tiddlywiki/PL_SQL insert lines for testing PITR.txt
Executable file
@@ -0,0 +1,21 @@
|
||||
alter session set NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI:SS';
|
||||
|
||||
drop table u0.t0 purge;
|
||||
create table u0.t0(d date);
|
||||
|
||||
declare
|
||||
|
||||
i integer;
|
||||
maxi integer default 10000000;
|
||||
|
||||
|
||||
begin
|
||||
for i in 1..maxi loop
|
||||
begin
|
||||
insert into u0.t0 values (sysdate);
|
||||
commit;
|
||||
sys.dbms_session.sleep(1);
|
||||
end;
|
||||
end loop;
|
||||
end;
|
||||
/
|
||||
68
tiddlywiki/Pending stats - scratchpad - 01.txt
Executable file
68
tiddlywiki/Pending stats - scratchpad - 01.txt
Executable file
@@ -0,0 +1,68 @@
|
||||
Optimizer Statistics Gathering – pending and history
|
||||
https://www.dbi-services.com/blog/optimizer-statistics-gathering-pending-and-history/
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
create user XIZOR identified by secret;
|
||||
grant connect, resource to XIZOR;
|
||||
grant unlimited tablespace to XIZOR;
|
||||
grant select any dictionary to XIZOR;
|
||||
|
||||
connect XIZOR/secret
|
||||
|
||||
create table DEMO as select rownum n from dual;
|
||||
|
||||
col analyzed for a30
|
||||
col published_prefs for a30
|
||||
|
||||
select num_rows,cast(last_analyzed as timestamp) analyzed,dbms_stats.get_prefs('PUBLISH',owner,table_name) published_prefs from dba_tab_statistics where owner='XIZOR' and table_name in ('DEMO');
|
||||
|
||||
|
||||
insert into DEMO select rownum n from xmltable('1 to 41');
|
||||
|
||||
|
||||
set pages 999 lines 200
|
||||
|
||||
select /*+ gather_plan_statistics */ count(*) from DEMO;
|
||||
|
||||
select * from table(dbms_xplan.display_cursor(format=>'basic +rows +rowstats last'));
|
||||
|
||||
exec dbms_stats.set_table_prefs('XIZOR','DEMO','PUBLISH','FALSE');
|
||||
|
||||
|
||||
exec dbms_stats.gather_table_stats('XIZOR','DEMO');
|
||||
|
||||
|
||||
select num_rows,cast(last_analyzed as timestamp) analyzed,dbms_stats.get_prefs('PUBLISH',owner,table_name) published_prefs from dba_tab_pending_stats where owner='XIZOR' and table_name in ('DEMO');
|
||||
|
||||
exec dbms_stats.delete_pending_stats('XIZOR','DEMO');
|
||||
|
||||
exec dbms_stats.publish_pending_stats('XIZOR','DEMO',no_invalidate=>false);
|
||||
|
||||
exec dbms_stats.set_table_prefs('XIZOR','DEMO','PUBLISH','TRUE');
|
||||
|
||||
exec dbms_stats.restore_table_stats('XIZOR','DEMO',sysdate-1,no_invalidate=>false);
|
||||
|
||||
|
||||
|
||||
select report from table(dbms_stats.diff_table_stats_in_history('XIZOR','DEMO',sysdate-1,sysdate,0));
|
||||
|
||||
select
|
||||
end_time,end_time-start_time,operation,target,
|
||||
regexp_replace(regexp_replace(notes,'" val="','=>'),'(||)',' '),
|
||||
status
|
||||
from
|
||||
DBA_OPTSTAT_OPERATIONS where regexp_like(target,'"?'||'XIZOR'||'"?."?'||'DEMO'||'"?') order by end_time desc fetch first 10 rows only
|
||||
/
|
||||
|
||||
|
||||
select table_name,stats_update_time from dba_tab_stats_history where owner='XIZOR' and table_name='DEMO';
|
||||
|
||||
set long 2000000
|
||||
set pagesize 1000
|
||||
|
||||
select * from table(dbms_stats.diff_table_stats_in_history(
|
||||
ownname => 'XIZOR',
|
||||
tabname => 'DEMO',
|
||||
time1 => systimestamp-1,
|
||||
time2 => systimestamp,
|
||||
pctthreshold => 0));
|
||||
44
tiddlywiki/PostgreSQL - pgSentinel.tid
Executable file
44
tiddlywiki/PostgreSQL - pgSentinel.tid
Executable file
@@ -0,0 +1,44 @@
|
||||
created: 20190616221128760
|
||||
creator: vplesnila
|
||||
modified: 20190616221559458
|
||||
modifier: vplesnila
|
||||
tags: PostgreSQL
|
||||
title: PostgreSQL - pgSentinel
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
! Parameteres for pg_stat_statements
|
||||
```
|
||||
shared_preload_libraries = 'pg_stat_statements'
|
||||
|
||||
pg_stat_statements.max = 10000
|
||||
pg_stat_statements.track = all
|
||||
```
|
||||
|
||||
! Parameteres for pg_sentinel
|
||||
```
|
||||
shared_preload_libraries = 'pg_stat_statements,pgsentinel'
|
||||
# Icncrease the max size of the query strings Postgres records
|
||||
track_activity_query_size = 2048
|
||||
# Track statements generated by stored procedures as well
|
||||
pg_stat_statements.track = all
|
||||
```
|
||||
|
||||
! Create the extensions at the DATABASE level
|
||||
|
||||
```
|
||||
create extension pg_stat_statements;
|
||||
create extension pgsentinel;
|
||||
```
|
||||
|
||||
! Performance views
|
||||
* `pg_stat_activity`
|
||||
* `pg_stat_statements`
|
||||
* `pg_active_session_history` (history of `pg_stat_activity`)
|
||||
|
||||
! Examples
|
||||
|
||||
```
|
||||
select ash_time,top_level_query,query,queryid,wait_event_type,wait_event from pg_active_session_history where query != 'ROLLBACK' order by ash_time desc limit 15;
|
||||
|
||||
select ash_time, wait_event, wait_event_type from pg_active_session_history where queryid=3548524963606505593 order by ash_time desc limit 15;
|
||||
```
|
||||
9
tiddlywiki/PostgreSQL.tid
Executable file
9
tiddlywiki/PostgreSQL.tid
Executable file
@@ -0,0 +1,9 @@
|
||||
color: #000040
|
||||
created: 20190622074252852
|
||||
creator: vplesnila
|
||||
modified: 20190622233240929
|
||||
modifier: vplesnila
|
||||
tags: Contents
|
||||
title: PostgreSQL
|
||||
type: text/vnd.tiddlywiki
|
||||
|
||||
22
tiddlywiki/PowerTools Repository on Rocky Linux 8.md
Executable file
22
tiddlywiki/PowerTools Repository on Rocky Linux 8.md
Executable file
@@ -0,0 +1,22 @@
|
||||
> [Original article](https://www.how2shout.com/linux/how-to-enable-powertools-repository-on-rocky-linux-8/)
|
||||
|
||||
Install DNF plugins package:
|
||||
|
||||
dnf install dnf-plugins-core
|
||||
|
||||
|
||||
Install EPEL:
|
||||
|
||||
dnf install epel-release
|
||||
|
||||
Enable PowerTools repository on Rocky Linux 8:
|
||||
|
||||
dnf config-manager --set-enabled powertools
|
||||
|
||||
Update command:
|
||||
|
||||
dnf update
|
||||
|
||||
Check the Added repository on Rocky Linux:
|
||||
|
||||
dnf repolist
|
||||
5
tiddlywiki/Proxy user.txt
Executable file
5
tiddlywiki/Proxy user.txt
Executable file
@@ -0,0 +1,5 @@
|
||||
create user DEPLOY identified by Alabalaportocala1#;
|
||||
grant create session to DEPLOY;
|
||||
alter user DRIVE grant connect through DEPLOY;
|
||||
connect DEPLOY[DRIVE]/Alabalaportocala1#@dmp01-scan/DRF1PRDEXA
|
||||
show user;
|
||||
1
tiddlywiki/Put archivelogs on Recovery Area.txt
Executable file
1
tiddlywiki/Put archivelogs on Recovery Area.txt
Executable file
@@ -0,0 +1 @@
|
||||
alter system set log_archive_dest_1='LOCATION=USE_DB_RECOVERY_FILE_DEST' scope=both sid='*';
|
||||
9
tiddlywiki/Python - pip examples.md
Executable file
9
tiddlywiki/Python - pip examples.md
Executable file
@@ -0,0 +1,9 @@
|
||||
Download module dependencies:
|
||||
|
||||
pip download Flask -d .
|
||||
pip download fabric2 -d .
|
||||
|
||||
Offline install a module using pip:
|
||||
|
||||
pip install --no-index --find-links ./ fabric2
|
||||
pip install --no-index --find-links ./ Flask
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user