2026-03-12 21:01:38

This commit is contained in:
2026-03-12 22:01:38 +01:00
parent 3bd1db26cc
commit 26296b6d6a
336 changed files with 27507 additions and 0 deletions

BIN
Golden_Gate/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,47 @@
Clean up old Extracts
---------------------
https://www.dbasolved.com/2022/04/clean-up-old-extracts/
0. Identify captures and log miner sessions
-------------------------------------------
set linesize 150
col capture_name format a20
select capture_name from dba_capture;
set linesize 130
col session_name format a20
col global_db_name format a45
select SESSION#,CLIENT#,SESSION_NAME,DB_ID,GLOBAL_DB_NAME from system.LOGMNR_SESSION$;
1. Drop the extracts
---------------------
exec DBMS_CAPTURE_ADM.DROP_CAPTURE ('<MY_CAPTURE_01>');
2. Drop queue tables from log miner
-----------------------------------
set linesize 250
col owner format a30
col name format a30
col queue_table format a30
select owner, name, queue_table from dba_queues where owner = 'OGGADMIN';
# delete in automatic mode
declare
v_queue_name varchar2(60);
begin
for i in (select queue_table, owner from dba_queues where owner = 'OGGADMIN')
loop
v_queue_name := i.owner||'.'||i.queue_table;
DBMS_AQADM.DROP_QUEUE_TABLE(queue_table => v_queue_name, force => TRUE);
end loop;
end;
/
# or delete one by one
exec DBMS_AQADM.DROP_QUEUE_TABLE(queue_table => '<OWNER>.<TABLE_NAME>', force => TRUE);
# note that tables with AQ$_ prefix will be autotaic deleted

View File

@@ -0,0 +1,234 @@
### Sources
- [OGG Documentation](https://docs.oracle.com/en/middleware/goldengate/core/19.1/securing/securing-deployments.html#GUID-472E5C9C-85FC-4B87-BB90-2CE877F41DC0)
- [Markdown Basic Syntax](https://www.markdownguide.org/basic-syntax/)
### Creating a Self-Signed Root Certificate
Create an automatic login wallet
orapki wallet create \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-auto_login
Create self-signed certificate
orapki wallet add -wallet ~/wallet_directory/root_ca \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-dn "CN=RootCA" \
-keysize 2048 \
-self_signed \
-validity 7300 \
-sign_alg sha256
Check the contents of the wallet
orapki wallet display \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;"
Export the certificate to a .pem file
orapki wallet export \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-dn "CN=RootCA" \
-cert /app/oracle/staging_area/export/rootCA_Cert.pem
### Creating Server Certificates
#### For [exegol] server
Create an automatic login wallet
orapki wallet create \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;" \
-auto_login
Add a Certificate Signing Request (CSR) to the servers wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;" \
-dn "CN=exegol.swgalaxy" \
-keysize 2048
Export the CSR to a .pem file
orapki wallet export \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;" \
-dn "CN=exegol.swgalaxy" \
-request /app/oracle/staging_area/export/exegol_req.pem
Using the CSR, create a signed server or client certificate and sign it using the root certificate.
Assign a unique serial number to each certificate.
orapki cert create \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-request /app/oracle/staging_area/export/exegol_req.pem \
-cert /app/oracle/staging_area/export/exegol_Cert.pem \
-serial_num 20 \
-validity 375 \
-sign_alg sha256
Add the root certificate into the clients or servers wallet as a trusted certificate.
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;" \
-trusted_cert \
-cert /app/oracle/staging_area/export/rootCA_Cert.pem
Add the server or client certificate as a user certificate into the clients or servers wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;" \
-user_cert \
-cert /app/oracle/staging_area/export/exegol_Cert.pem
Check the contents of the wallet
orapki wallet display \
-wallet /app/oracle/staging_area/wallet_dir/exegol \
-pwd "TabulaRasa32;"
#### For [helska] server
Create an automatic login wallet
orapki wallet create \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;" \
-auto_login
Add a Certificate Signing Request (CSR) to the servers wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;" \
-dn "CN=helska.swgalaxy" \
-keysize 2048
Export the CSR to a .pem file
orapki wallet export \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;" \
-dn "CN=helska.swgalaxy" \
-request /app/oracle/staging_area/export/helska_req.pem
Using the CSR, create a signed server or client certificate and sign it using the root certificate.
Assign a unique serial number to each certificate.
orapki cert create \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-request /app/oracle/staging_area/export/helska_req.pem \
-cert /app/oracle/staging_area/export/helska_Cert.pem \
-serial_num 21 \
-validity 375 \
-sign_alg sha256
Add the root certificate into the clients or servers wallet as a trusted certificate.
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;" \
-trusted_cert \
-cert /app/oracle/staging_area/export/rootCA_Cert.pem
Add the server or client certificate as a user certificate into the clients or servers wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;" \
-user_cert \
-cert /app/oracle/staging_area/export/helska_Cert.pem
Check the contents of the wallet
orapki wallet display \
-wallet /app/oracle/staging_area/wallet_dir/helska \
-pwd "SicSemper81;"
### Creating a Distribution Server User Certificate
Create an automatic login wallet
orapki wallet create \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91" \
-auto_login
Add a Certificate Signing Request (CSR) to the wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91" \
-dn "CN=dist_client" \
-keysize 2048
Export the CSR to a .pem file
orapki wallet export \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91" \
-dn "CN=dist_client" \
-request /app/oracle/staging_area/export/dist_client_req.pem
Using the CSR, create a signed certificate and sign it using the root certificate.
Assign a unique serial number to each certificate.
orapki cert create \
-wallet /app/oracle/staging_area/wallet_dir/rootCA \
-pwd "LuxAeterna12;" \
-request /app/oracle/staging_area/export/dist_client_req.pem \
-cert /app/oracle/staging_area/export/dist_client_Cert.pem \
-serial_num 22 \
-validity 375 \
-sign_alg sha256
Add the root certificate into the clients or servers wallet as a trusted certificate.
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91" \
-trusted_cert \
-cert /app/oracle/staging_area/export/rootCA_Cert.pem
Add the server or client certificate as a user certificate into the clients or servers wallet
orapki wallet add \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91" \
-user_cert \
-cert /app/oracle/staging_area/export/dist_client_Cert.pem
Check the contents of the wallet
orapki wallet display \
-wallet /app/oracle/staging_area/wallet_dir/dist_client \
-pwd "LapsusLinguae91"
### Trusted Certificates
Both the Distribution Server and Receiver Server need certificates.
- The Distribution Server uses the certificate in the client wallet location under outbound section
- For the Receiver Server, the certificate is in the wallet for the inbound wallet location
For self-signed certificates, you can choose from one of the following:
- Have both certificates signed by the same Root Certificate
- The other sides certificate is added to the local wallet as trusted certificate

View File

@@ -0,0 +1,296 @@
## Context
- setup extract/replicat for 3 tables: ORDERS, PRODUCTS and USERS
- add 2 new tables TRANSACTIONS and TASKS to this extract/replica peer
The aim is to minimize the downtime for the peer extract/replicat, so we will proceed in 2 steps:
- create a second parallel extract/replicat for the 2 new tables
- merge the second extract/replicat to initial extract/replicat
## Extract setup
Add trandata to tables:
dblogin useridalias YODA
add trandata GREEN.ORDERS
add trandata GREEN.PRODUCTS
add trandata GREEN.USERS
list tables GREEN.*
Define params file for extract:
edit params EXTRAA
extract EXTRAA
useridalias JEDIPRD
sourcecatalog YODA
exttrail ./dirdat/aa
purgeoldextracts
checkpointsecs 1
ddl include mapped
warnlongtrans 1h, checkinterval 30m
------------------------------------
table GREEN.ORDERS;
table GREEN.PRODUCTS;
table GREEN.USERS;
Add, register and start extract:
dblogin useridalias JEDIPRD
add extract EXTRAA, integrated tranlog, begin now
add exttrail ./dirdat/aa, extract EXTRAA
register extract EXTRAA, database container (YODA)
start extract EXTRAA
info extract EXTRAA detail
## Initial load
Note down the current SCN on source database.
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
10138382
On target DB create tables structure for ORDERS, PRODUCTS, USERS and do the inlitial load:
SCN=10138382
impdp userid=admin/"Secret00!"@togoria/MAUL network_link=GREEN_AT_YODA logfile=MY:import_01.log remap_schema=GREEN:RED tables=GREEN.ORDERS,GREEN.PRODUCTS,GREEN.USERS TABLE_EXISTS_ACTION=TRUNCATE flashback_scn=$SCN
## Replicat setup
Define params file for replicat.
Take care to filter `filter(@GETENV ('TRANSACTION','CSN')`, it must be positionned to the SCN of initial load.
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS, target MAUL.RED.ORDERS, filter(@GETENV ('TRANSACTION','CSN') > 10138382);
map YODA.GREEN.PRODUCTS, target MAUL.RED.PRODUCTS, filter(@GETENV ('TRANSACTION','CSN') > 10138382);
map YODA.GREEN.USERS, target MAUL.RED.USERS, filter(@GETENV ('TRANSACTION','CSN') > 10138382);
Add and start replicat:
add replicat REPLAA, integrated, exttrail ./dirdat/aa
dblogin useridalias SITHPRD
register replicat REPLAA database
start replicat REPLAA
info all
Wait to catch the lag:
lag replicat
When done you can remove filter `filter(@GETENV ('TRANSACTION','CSN')`
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS , target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS , target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS , target MAUL.RED.USERS ;
restart replicat REPLAA
## Add 2 new tables to extract/replicat
Add trandata to tables:
dblogin useridalias YODA
add trandata GREEN.TRANSACTIONS
add trandata GREEN.TASKS
list tables GREEN.*
Create a second extract EXTRAB to manage the new tables.
Define extract parameters:
edit params EXTRAB
extract EXTRAB
useridalias JEDIPRD
sourcecatalog YODA
exttrail ./dirdat/ab
purgeoldextracts
checkpointsecs 1
ddl include mapped
warnlongtrans 1h, checkinterval 30m
table GREEN.TRANSACTIONS;
table GREEN.TASKS;
Add, register and start extract:
dblogin useridalias JEDIPRD
add extract EXTRAB, integrated tranlog, begin now
add exttrail ./dirdat/ab, extract EXTRAB
register extract EXTRAB, database container (YODA)
start extract EXTRAB
info extract EXTRAB detail
## Initial load for new tables
Note down the current SCN on source database.
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
10284191
On target DB create tables structure for TRANSACTIONS, TASKS and do the inlitial load:
SCN=10284191
impdp userid=admin/"Secret00!"@togoria/MAUL network_link=GREEN_AT_YODA logfile=MY:import_02.log remap_schema=GREEN:RED tables=GREEN.TRANSACTIONS,GREEN.TASKS TABLE_EXISTS_ACTION=TRUNCATE flashback_scn=$SCN
## New replicat setup
Define extract parameters.
Pay attention to `filter(@GETENV ('TRANSACTION','CSN')` clause to be setup to SCN of intial datapump load.
edit params REPLAB
replicat REPLAB
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAB.dsc, purge, megabytes 10
map YODA.GREEN.TRANSACTIONS, target MAUL.RED.TRANSACTIONS, filter(@GETENV ('TRANSACTION','CSN') > 10284191);
map YODA.GREEN.TASKS, target MAUL.RED.TASKS, filter(@GETENV ('TRANSACTION','CSN') > 10284191);
Add and start new replicat:
add replicat REPLAB, integrated, exttrail ./dirdat/ab
dblogin useridalias SITHPRD
register replicat REPLAB database
start replicat REPLAB
info all
Check if new replicat is running and wait to lag 0.
## Integrate the 2 new tables to initial extract/replicat: EXTRAA/REPLAA
Add new tables to initial extract for a **double run**:
edit params EXTRAA
extract EXTRAA
useridalias JEDIPRD
sourcecatalog YODA
exttrail ./dirdat/aa
purgeoldextracts
checkpointsecs 1
ddl include mapped
warnlongtrans 1h, checkinterval 30m
table GREEN.ORDERS;
table GREEN.PRODUCTS;
table GREEN.USERS;
table GREEN.TRANSACTIONS;
table GREEN.TASKS;
Restart extract EXTRAA:
restart extract EXTRAA
Stop extracts in this **strictly order**:
- **first** extract: EXTRAA
- **second** extract: EXTRAB
> It is **mandatory** to stop extracts in this order.
> **The applied SCN on first replicat tables must be less than the SCN on second replicat** in order to allow the first replicat to start at the last applied psition in the trail file. Like this, the first replicat must not be repositionned in the past.
stop EXTRACT EXTRAA
stop EXTRACT EXTRAB
Now stop both replicat also:
stop replicat REPLAA
stop replicat REPLAB
Note down the SCN for each extract and premare new params file for initial replicat.
info extract EXTRAA detail
info extract EXTRAB detail
In my case:
- EXTRAA: SCN=10358472
- EXTRAB: SCN=10358544
> The SCN of EXTRAB should be greater than the SCN of EXTRAA
Update REPLAA replicat parameter file in accordance with the latest SCN applied on new tables (the SCN of EXTRAB):
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS , target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS , target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS , target MAUL.RED.USERS ;
map YODA.GREEN.TRANSACTIONS , target MAUL.RED.TRANSACTIONS, filter(@GETENV ('TRANSACTION','CSN') > 10358544);
map YODA.GREEN.TASKS , target MAUL.RED.TASKS, filter(@GETENV ('TRANSACTION','CSN') > 10358544);
Start first extract/replicat
start extract EXTRAA
start replicat REPLAA
When the lag is zero you can remove filter `filter(@GETENV ('TRANSACTION','CSN')`
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS , target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS , target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS , target MAUL.RED.USERS ;
map YODA.GREEN.TRANSACTIONS , target MAUL.RED.TRANSACTIONS ;
map YODA.GREEN.TASKS , target MAUL.RED.TASKS ;
Restart first replicat:
start replicat REPLAA
Now all tables are integrated in first extract/replicat.
## Remove second extract/replicat
dblogin useridalias JEDIPRD
unregister extract EXTRAB database
delete extract EXTRAB
dblogin useridalias MAUL
unregister replicat REPLAB database
delete replicat REPLAB

View File

@@ -0,0 +1,12 @@
select 'ORDERS (target)='||count(1) as "#rows" from RED.ORDERS union
select 'ORDERS (source)='||count(1) as "#rows" from GREEN.ORDERS@GREEN_AT_YODA union
select 'PRODUCTS (target)='||count(1) as "#rows" from RED.PRODUCTS union
select 'PRODUCTS (source)='||count(1) as "#rows" from GREEN.PRODUCTS@GREEN_AT_YODA union
select 'USERS (target)='||count(1) as "#rows" from RED.USERS union
select 'USERS (source)='||count(1) as "#rows" from GREEN.USERS@GREEN_AT_YODA union
select 'TRANSACTIONS (target)='||count(1) as "#rows" from RED.TRANSACTIONS union
select 'TRANSACTIONS (source)='||count(1) as "#rows" from GREEN.TRANSACTIONS@GREEN_AT_YODA union
select 'TASKS (target)='||count(1) as "#rows" from RED.TASKS union
select 'TASKS (source)='||count(1) as "#rows" from GREEN.TASKS@GREEN_AT_YODA
order by 1 asc
/

View File

@@ -0,0 +1,83 @@
-- Create sequences for primary key generation
CREATE SEQUENCE seq_products START WITH 1 INCREMENT BY 1;
CREATE SEQUENCE seq_orders START WITH 1 INCREMENT BY 1;
CREATE SEQUENCE seq_users START WITH 1 INCREMENT BY 1;
CREATE SEQUENCE seq_transactions START WITH 1 INCREMENT BY 1;
CREATE SEQUENCE seq_tasks START WITH 1 INCREMENT BY 1;
-- Create tables with meaningful names and relevant columns
CREATE TABLE products (
id NUMBER PRIMARY KEY,
name VARCHAR2(100),
category VARCHAR2(20),
quantity INTEGER
);
CREATE TABLE orders (
id NUMBER PRIMARY KEY,
description VARCHAR2(255),
status VARCHAR2(20)
);
CREATE TABLE users (
id NUMBER PRIMARY KEY,
created_at DATE DEFAULT SYSDATE,
username VARCHAR2(20),
age INTEGER,
location VARCHAR2(20)
);
CREATE TABLE transactions (
id NUMBER PRIMARY KEY,
amount NUMBER(10,2),
currency VARCHAR2(20)
);
CREATE TABLE tasks (
id NUMBER PRIMARY KEY,
status VARCHAR2(50),
priority INTEGER,
type VARCHAR2(20),
assigned_to VARCHAR2(20)
);
-- Create triggers to auto-generate primary key values using sequences
CREATE OR REPLACE TRIGGER trg_products_pk
BEFORE INSERT ON products
FOR EACH ROW
BEGIN
SELECT seq_products.NEXTVAL INTO :NEW.id FROM dual;
END;
/
CREATE OR REPLACE TRIGGER trg_orders_pk
BEFORE INSERT ON orders
FOR EACH ROW
BEGIN
SELECT seq_orders.NEXTVAL INTO :NEW.id FROM dual;
END;
/
CREATE OR REPLACE TRIGGER trg_users_pk
BEFORE INSERT ON users
FOR EACH ROW
BEGIN
SELECT seq_users.NEXTVAL INTO :NEW.id FROM dual;
END;
/
CREATE OR REPLACE TRIGGER trg_transactions_pk
BEFORE INSERT ON transactions
FOR EACH ROW
BEGIN
SELECT seq_transactions.NEXTVAL INTO :NEW.id FROM dual;
END;
/
CREATE OR REPLACE TRIGGER trg_tasks_pk
BEFORE INSERT ON tasks
FOR EACH ROW
BEGIN
SELECT seq_tasks.NEXTVAL INTO :NEW.id FROM dual;
END;
/

View File

@@ -0,0 +1,16 @@
## Delete an integreted replicat
dblogin useridalias SITHPRD
stop replicat REPLAB
unregister replicat REPLAB database
delete replicat REPLAB
info all
## Delete an integreted extract
dblogin useridalias JEDIPRD
stop extract EXTRAB
unregister extract EXTRAB database
delete extract EXTRAB
info all

View File

@@ -0,0 +1,20 @@
--Stop the job (Disable)
BEGIN
DBMS_SCHEDULER.disable('JOB_MANAGE_DATA');
END;
/
--Restart the job
BEGIN
DBMS_SCHEDULER.enable('JOB_MANAGE_DATA');
END;
/
--Fully Remove the Job
BEGIN
DBMS_SCHEDULER.drop_job('JOB_MANAGE_DATA');
END;
/

View File

@@ -0,0 +1,195 @@
## Context
Replicat is ABBENDED because of data issue.
The aim is to restablish the replicat and minimize the downtime.
## Provoke a failure on replicat
On target database truncate RED.TRANSACTIONS table:
truncate table RED.TRANSACTIONS;
Replicat will be abbended because of update/delete orders:
status replicat REPLAA
REPLICAT REPLAA: ABENDED
## Remove tablme from replicat
Comment MAP line relative to TRANSACTIONS table on replicat and restart the replicat.
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS , target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS , target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS , target MAUL.RED.USERS ;
-- map YODA.GREEN.TRANSACTIONS , target MAUL.RED.TRANSACTIONS ;
map YODA.GREEN.TASKS , target MAUL.RED.TASKS ;
start replicat REPLAA
At this moment replicat should be **RUNNING**.
## Create a dedicated extract/replicat for the table in failiure
Create a second extract EXTRAB to manage the new tables.
Define extract parameters:
edit params EXTRAB
extract EXTRAB
useridalias JEDIPRD
sourcecatalog YODA
exttrail ./dirdat/ab
purgeoldextracts
checkpointsecs 1
ddl include mapped
warnlongtrans 1h, checkinterval 30m
table GREEN.TRANSACTIONS;
Add, register and start extract:
dblogin useridalias JEDIPRD
add extract EXTRAB, integrated tranlog, begin now
add exttrail ./dirdat/ab, extract EXTRAB
register extract EXTRAB, database container (YODA)
start extract EXTRAB
info extract EXTRAB detail
> Start **distribution path** (aka **PUMP**) if the replicat is running on distant site (Golden Gate deployment)
## Initial load
Note down the current SCN on source database.
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
12234159
On target DB create tables structure for TRANSACTIONS, TASKS and do the inlitial load:
SCN=12234159
impdp userid=admin/"Secret00!"@togoria/MAUL network_link=GREEN_AT_YODA logfile=MY:import_03.log remap_schema=GREEN:RED tables=GREEN.TRANSACTIONS TABLE_EXISTS_ACTION=TRUNCATE flashback_scn=$SCN
## New replicat setup
Define extract parameters.
Pay attention to `filter(@GETENV ('TRANSACTION','CSN')` clause to be setup to SCN of intial datapump load.
edit params REPLAB
replicat REPLAB
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAB.dsc, purge, megabytes 10
map YODA.GREEN.TRANSACTIONS, target MAUL.RED.TRANSACTIONS, filter(@GETENV ('TRANSACTION','CSN') > 12234159);
Add and start new replicat:
add replicat REPLAB, integrated, exttrail ./dirdat/ab
dblogin useridalias SITHPRD
register replicat REPLAB database
start replicat REPLAB
info all
Check if new replicat is running and wait to lag 0.
## Reintegrate table to initial extract/replicat
Now, TRANSACTIONS table is replicated by EXTRAB/REPLAB, but not by intial replication EXTRAA/REPLAA.
Let's reintegrate TRANSACTIONS in intial replication EXTRAA/REPLAA.
Note that TRANSACTIONS was not removed from EXTRAA definition, so all table changes are still recorded in EXTRAA trail files.
Stop extracts in this **strictly order**:
- **first** extract: EXTRAA
- **second** extract: EXTRAB
> It is **mandatory** to stop extracts in this order.
> **The applied SCN on first replicat tables must be less than the SCN on second replicat** in order to allow the first replicat to start at the last applied position in the trail file. Like this, the first replicat must not be repositionned in the past.
stop EXTRACT EXTRAA
stop EXTRACT EXTRAB
Now stop both replicat also:
stop replicat REPLAA
stop replicat REPLAB
Note down the SCN for each extract and premare new params file for initial replicat.
info extract EXTRAA detail
info extract EXTRAB detail
In my case:
- EXTRAA: SCN=12245651
- EXTRAB: SCN=12245894
> The SCN of EXTRAB should be greater than the SCN of EXTRAA
Update REPLAA replicat parameter file in accordance with the latest SCN applied TRANSACTION table (the SCN of EXTRAB):
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS, target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS, target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS, target MAUL.RED.USERS ;
map YODA.GREEN.TASKS, target MAUL.RED.TASKS ;
map YODA.GREEN.TRANSACTIONS, target MAUL.RED.TRANSACTIONS, filter(@GETENV ('TRANSACTION','CSN') > 12245894);
Start first extract/replicat
start extract EXTRAA
start replicat REPLAA
When the lag is zero you can remove filter `filter(@GETENV ('TRANSACTION','CSN')` from REPLAA.
stop replicat REPLAA
edit params REPLAA
replicat REPLAA
useridalias MAUL
dboptions enable_instantiation_filtering
discardfile REPLAA.dsc, purge, megabytes 10
map YODA.GREEN.ORDERS , target MAUL.RED.ORDERS ;
map YODA.GREEN.PRODUCTS , target MAUL.RED.PRODUCTS ;
map YODA.GREEN.USERS , target MAUL.RED.USERS ;
map YODA.GREEN.TASKS , target MAUL.RED.TASKS ;
map YODA.GREEN.TRANSACTIONS , target MAUL.RED.TRANSACTIONS ;
Restart REPLAA replicat:
start replicat REPLAA
Now all tables are integrated in first extract/replicat.
## Remove second extract/replicat
dblogin useridalias JEDIPRD
unregister extract EXTRAB database
delete extract EXTRAB
dblogin useridalias MAUL
unregister replicat REPLAB database
delete replicat REPLAB
Stop and delete **distribution path** (aka **PUMP**) if the replicat is running on distant site (Golden Gate deployment).

View File

@@ -0,0 +1,91 @@
-- Step 1: Create the stored procedure
CREATE OR REPLACE PROCEDURE manage_data IS
new_products INTEGER default 3;
new_orders INTEGER default 10;
new_users INTEGER default 2;
new_transactions INTEGER default 20;
new_tasks INTEGER default 5;
BEGIN
FOR i IN 1..new_products LOOP
INSERT INTO products (id, name, category, quantity)
VALUES (seq_products.NEXTVAL,
DBMS_RANDOM.STRING('A', 10),
DBMS_RANDOM.STRING('A', 20),
TRUNC(DBMS_RANDOM.VALUE(1, 100)));
END LOOP;
FOR i IN 1..new_orders LOOP
INSERT INTO orders (id, description, status)
VALUES (seq_orders.NEXTVAL,
DBMS_RANDOM.STRING('A', 50),
DBMS_RANDOM.STRING('A', 20));
END LOOP;
FOR i IN 1..new_users LOOP
INSERT INTO users (id, created_at, username, age, location)
VALUES (seq_users.NEXTVAL, SYSDATE,
DBMS_RANDOM.STRING('A', 15),
TRUNC(DBMS_RANDOM.VALUE(18, 60)),
DBMS_RANDOM.STRING('A', 20));
END LOOP;
FOR i IN 1..new_transactions LOOP
INSERT INTO transactions (id, amount, currency)
VALUES (seq_transactions.NEXTVAL,
ROUND(DBMS_RANDOM.VALUE(1, 10000), 2),
DBMS_RANDOM.STRING('A', 3));
END LOOP;
FOR i IN 1..new_tasks LOOP
INSERT INTO tasks (id, status, priority, type, assigned_to)
VALUES (seq_tasks.NEXTVAL,
DBMS_RANDOM.STRING('A', 20),
TRUNC(DBMS_RANDOM.VALUE(1, 10)),
DBMS_RANDOM.STRING('A', 20),
DBMS_RANDOM.STRING('A', 15));
END LOOP;
-- Update 2 random rows in each table
UPDATE products SET quantity = TRUNC(DBMS_RANDOM.VALUE(1, 200))
WHERE id IN (SELECT id FROM products ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 2 ROWS ONLY);
UPDATE orders SET status = DBMS_RANDOM.STRING('A', 20)
WHERE id IN (SELECT id FROM orders ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 2 ROWS ONLY);
UPDATE users SET age = TRUNC(DBMS_RANDOM.VALUE(18, 75))
WHERE id IN (SELECT id FROM users ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 2 ROWS ONLY);
UPDATE transactions SET amount = ROUND(DBMS_RANDOM.VALUE(1, 5000), 2)
WHERE id IN (SELECT id FROM transactions ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 2 ROWS ONLY);
UPDATE tasks SET priority = TRUNC(DBMS_RANDOM.VALUE(1, 10))
WHERE id IN (SELECT id FROM tasks ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 2 ROWS ONLY);
-- Delete 1 random row from each table
DELETE FROM products WHERE id = (SELECT id FROM products ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 1 ROW ONLY);
DELETE FROM orders WHERE id = (SELECT id FROM orders ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 1 ROW ONLY);
DELETE FROM users WHERE id = (SELECT id FROM users ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 1 ROW ONLY);
DELETE FROM transactions WHERE id = (SELECT id FROM transactions ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 1 ROW ONLY);
DELETE FROM tasks WHERE id = (SELECT id FROM tasks ORDER BY DBMS_RANDOM.VALUE FETCH FIRST 1 ROW ONLY);
COMMIT;
END;
/
-- Step 2: Create a scheduled job to run every 10 seconds
BEGIN
DBMS_SCHEDULER.create_job (
job_name => 'JOB_MANAGE_DATA',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN manage_data; END;',
start_date => SYSTIMESTAMP,
repeat_interval => 'FREQ=SECONDLY; INTERVAL=10',
enabled => TRUE
);
END;
/

74
Golden_Gate/ogg_01.txt Normal file
View File

@@ -0,0 +1,74 @@
https://www.dbi-services.com/blog/setting-up-a-sample-replication-with-goldengate/
# source: 19c database, schema OTTER, NON-CDB //togoria:1521/ANDOPRD
# target: 21c database, schema BEAVER, PDB //bakura:1521/WOMBAT
-- on source DB
create user OTTER identified by "K91@9kLorg1j_7OxV";
grant connect,resource to OTTER;
alter user OTTER quota unlimited on USERS;
-- on target DB
create user BEAVER identified by "Versq99#LerB009aX";
grant connect,resource to BEAVER;
alter user BEAVER quota unlimited on USERS;
# on BOTH databases
###################
# check if ARCHIVELOG mode is ON
archive log list;
# activate integrated OGG replication
alter system set enable_goldengate_replication=TRUE scope=both sid='*';
# put databases in FORCE LOGGING mode
alter database force logging;
# add suplimental log
alter database add supplemental log data;
# create a GoldenGate admin user
create user OGGADMIN identified by "eXtpam!ZarghOzVe81p@1";
grant create session to OGGADMIN;
grant select any dictionary to OGGADMIN;
exec DBMS_GOLDENGATE_AUTH.GRANT_ADMIN_PRIVILEGE ('OGGADMIN');
grant flashback any table to OGGADMIN;
# test GoldenGate admin user connections
sqlplus /nolog
connect OGGADMIN/"eXtpam!ZarghOzVe81p@1"@//togoria:1521/ANDOPRD
connect OGGADMIN/"eXtpam!ZarghOzVe81p@1"@//bakura:1521/WOMBAT
# create tables to repliacate on source DB
create table OTTER.T1(d date);
ggsci
create wallet
add credentialstore
alter credentialstore add user OGGADMIN@//togoria:1521/ANDOPRD password "eXtpam!ZarghOzVe81p@1" alias ANDOPRD
info credentialstore
dblogin useridalias ANDOPRD
add trandata OTTER.T1
# cleanup
#########
# on source DB
drop user OTTER cascade;
drop user OGGADMIN cascade;
# on target DB
drop user WOMBAT cascade;
drop user OGGADMIN cascade;

128
Golden_Gate/ogg_02.txt Normal file
View File

@@ -0,0 +1,128 @@
alias gg='rlwrap /app/oracle/product/ogg21/ggsci'
create user OGGADMIN identified by "eXtpam!ZarghOzVe81p@1";
# maybe too much
grant DBA to OGGADMIN;
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//togoria:1521/ANDOPRD password "eXtpam!ZarghOzVe81p@1" alias ANDOPRD domain admin
dblogin useridalias ANDOPRD domain admin
list tables OTTER.*
# delete trandata OTTER.*
add trandata OTTER.*
Edit params ./GLOBALS
#-->
GGSCHEMA OGGADMIN
#<--
edit params myextr1
#-->
EXTRACT myextr1
USERID OGGADMIN@//togoria:1521/ANDOPRD, PASSWORD "eXtpam!ZarghOzVe81p@1"
EXTTRAIL ./dirdat/ex
CHECKPOINTSECS 1
TABLE OTTER.*;
#<--
ADD EXTRACT myextr1, TRANLOG, BEGIN now
REGISTER EXTRACT myextr1, DATABASE
ADD EXTTRAIL ./dirdat/ex, EXTRACT myextr1
START EXTRACT myextr1
info myextr1
edit param mypump1
#-->
EXTRACT mypump1
PASSTHRU
RMTHOST bakura, MGRPORT 7809
RMTTRAIL ./dirdat/RT
CHECKPOINTSECS 1
TABLE OTTER.*;
#<--
ADD EXTRACT mypump1, EXTTRAILSOURCE ./dirdat/ex
Add RMTTRAIL ./dirdat/rt, EXTRACT mypump1
START EXTRACT mypump1
info mypump1
add checkpointtable OGGADMIN.checkpointtable
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//bakura:1521/EWOKPRD password "eXtpam!ZarghOzVe81p@1" alias EWOKPRD domain admin
dblogin useridalias EWOKPRD domain admin
add checkpointtable OGGADMIN.checkpointtable
edit params myrepl1
#-->
REPLICAT myrepl1
USERID OGGADMIN@//bakura:1521/EWOKPRD, PASSWORD "eXtpam!ZarghOzVe81p@1"
DISCARDFILE ./dirdsc/myrepl1.dsc, PURGE
ASSUMETARGETDEFS
MAP OTTER.*, TARGET OTTER.*;
#<--
add replicat myrepl1, EXTTRAIL ./dirdat/RT, checkpointtable OGGADMIN.checkpointtable
start MYREPL1
create spfile='/app/oracle/base/admin/EWOKPRD/spfile/spfileEWOKPRD.ora' from pfile='/mnt/yavin4/tmp/_oracle_/tmp/ANDO.txt';
# create a static listener to connect as sysdba in NOMOUNT state
oracle@bakura[EWOKPRD]:/mnt/yavin4/tmp/_oracle_/tmp$ cat listener.ora
MYLSNR =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = bakura)(PORT = 1600))
)
)
SID_LIST_MYLSNR =
(SID_LIST =
(SID_DESC =
(GLOBAL_DBNAME = EWOKPRD_STATIC)
(SID_NAME = EWOKPRD)
(ORACLE_HOME = /app/oracle/product/19)
)
)
export TNS_ADMIN=/mnt/yavin4/tmp/_oracle_/tmp
lsnrctl start MYLSNR
lsnrctl status MYLSNR
connect sys/"Secret00!"@//bakura:1600/EWOKPRD_STATIC as sysdba
connect sys/"Secret00!"@//togoria:1521/ANDOPRD as sysdba
rman target=sys/"Secret00!"@//togoria:1521/ANDOPRD auxiliary=sys/"Secret00!"@//bakura:1600/EWOKPRD_STATIC
run {
allocate channel pri1 device type DISK;
allocate channel pri2 device type DISK;
allocate channel pri3 device type DISK;
allocate channel pri4 device type DISK;
allocate auxiliary channel aux1 device type DISK;
allocate auxiliary channel aux2 device type DISK;
allocate auxiliary channel aux3 device type DISK;
allocate auxiliary channel aux4 device type DISK;
duplicate target database to 'EWOK'
from active database
using compressed backupset section size 1G;
}

147
Golden_Gate/ogg_03.txt Normal file
View File

@@ -0,0 +1,147 @@
-- https://www.dbi-services.com/blog/performing-an-initial-load-with-goldengate-1-file-to-replicat/
-- https://www.dbi-services.com/blog/performing-an-initial-load-with-goldengate-2-expdpimpdp/
Source DB: ANDOPRD@togoria
Target DB: EWOKPRD@bakura
alias gg='rlwrap /app/oracle/product/ogg21/ggsci'
# install HR schema on source database
@install.sql
# install HR schema on target database, disable constraints and delete all data
@install.sql
connect / as sysdba
declare
lv_statement varchar2(2000);
begin
for r in ( select c.CONSTRAINT_NAME, c.TABLE_NAME
from dba_constraints c
, dba_tables t
where c.owner = 'HR'
and t.table_name = c.table_name
and t.owner = 'HR'
and c.constraint_type != 'P'
)
loop
lv_statement := 'alter table hr.'||r.TABLE_NAME||' disable constraint '||r.CONSTRAINT_NAME;
execute immediate lv_statement;
end loop;
for r in ( select table_name
from dba_tables
where owner = 'HR'
)
loop
execute immediate 'delete hr.'||r.table_name;
end loop;
end;
/
select count(*) from hr.employees;
select count(*) from hr.jobs;
# create OGGADMIN user on both databases
create user OGGADMIN identified by "Chan8em11fUwant!";
grant dba to OGGADMIN;
# on source machine
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//togoria:1521/ANDOPRD password "Chan8em11fUwant!" alias ANDOPRD domain admin
info credentialstore domain admin
dblogin useridalias ANDOPRD domain admin
# on target machine
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//bakura:1521/EWOKPRD password "Chan8em11fUwant!" alias EWOKPRD domain admin
info credentialstore domain admin
dblogin useridalias EWOKPRD domain admin
# on source machine
dblogin useridalias ANDOPRD domain admin
list tables HR.*
add trandata HR.*
# on source, in order to catch transactions during the initial load, we will create an extract for Change Data Capture
edit params extrcdc1
-------------------------------->
EXTRACT extrcdc1
useridalias ANDOPRD domain admin
EXTTRAIL ./dirdat/gg
LOGALLSUPCOLS
UPDATERECORDFORMAT compact
TABLE HR.*;
TABLEEXCLUDE HR.EMP_DETAILS_VIEW;
<--------------------------------
dblogin useridalias ANDOPRD domain admin
register extract extrcdc1 database
add extract extrcdc1, integrated tranlog, begin now
EXTRACT added.
add extract extrcdc1, integrated tranlog, begin now
add exttrail ./dirdat/gg, extract extrcdc1, megabytes 5
# on source, configure the datapump
edit params dppump1
-------------------------------->
EXTRACT dppump1
PASSTHRU
RMTHOST bakura, MGRPORT 7809
RMTTRAIL ./dirdat/jj
TABLE HR.*;
TABLEEXCLUDE HR.EMP_DETAILS_VIEW;
<--------------------------------
add extract dppump1, exttrailsource ./dirdat/gg
add rmttrail ./dirdat/jj, extract dppump1, megabytes 5
# on sourxe, start extracts CDC capture and datapump
start extract dppump1
start extract extrcdc1
info *
# on target, configure replicat for CDC
edit params replcdd
-------------------------------->
REPLICAT replcdd
ASSUMETARGETDEFS
DISCARDFILE ./dirrpt/replccd.dsc, purge
useridalias EWOKPRD domain admin
MAP HR.*, TARGET HR.*;
<--------------------------------
dblogin useridalias EWOKPRD domain admin
add replicat replcdd, integrated, exttrail ./dirdat/jj
# We will NOT START the replicat right now as we wan to do the initial load before
# Note down the current scn of the source database
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
3968490
# on destination, import HS schema
create public database link ANDOPRD connect to OGGADMIN identified by "Chan8em11fUwant!" using '//togoria:1521/ANDOPRD';
select * from DUAL@ANDOPRD;
impdp userid=OGGADMIN/"Chan8em11fUwant!"@//bakura:1521/EWOKPRD logfile=MY:HR.log network_link=ANDOPRD schemas=HR flashback_scn=3968490
start replicat replcdd, aftercsn 3968490

416
Golden_Gate/ogg_04.txt Normal file
View File

@@ -0,0 +1,416 @@
# setup source schema
#####################
create user WOMBAT identified by "NDbGvewNHVj8@#2FFGfz!De";
grant connect, resource to WOMBAT;
alter user WOMBAT quota unlimited on USERS;
connect WOMBAT/"NDbGvewNHVj8@#2FFGfz!De";
drop table T0 purge;
drop table T1 purge;
drop table T2 purge;
drop table T3 purge;
create table JOB (
id NUMBER GENERATED ALWAYS AS IDENTITY,
d DATE not null
);
alter table JOB add constraint JOB_PK_ID primary key (ID);
create table T0 (
id NUMBER GENERATED ALWAYS AS IDENTITY,
d DATE not null,
c VARCHAR2(20),
n NUMBER
)
partition by range (d)
interval (interval '1' MONTH) (
partition p0 values less than (DATE'2000-01-01')
)
;
alter table T0 add constraint T0_PK_ID primary key (ID);
create table T1 (
d DATE not null,
c VARCHAR2(10),
n1 NUMBER,
n2 NUMBER
)
partition by range (d)
interval (interval '1' MONTH) (
partition p0 values less than (DATE'2000-01-01')
)
;
create table T2 (
d DATE not null,
n1 NUMBER,
n2 NUMBER,
n3 NUMBER
)
partition by range (d)
interval (interval '1' MONTH) (
partition p0 values less than (DATE'2000-01-01')
)
;
create table T3 (
d DATE not null,
n NUMBER,
c1 VARCHAR2(10),
c2 VARCHAR2(10),
c3 VARCHAR2(10)
)
partition by range (d)
interval (interval '1' MONTH) (
partition p0 values less than (DATE'2000-01-01')
)
;
CREATE OR REPLACE FUNCTION random_date(
p_from IN DATE,
p_to IN DATE
) RETURN DATE
IS
BEGIN
RETURN p_from + DBMS_RANDOM.VALUE() * (p_to -p_from);
END random_date;
/
CREATE OR REPLACE FUNCTION random_string(
maxsize IN NUMBER
) RETURN VARCHAR2
IS
BEGIN
RETURN dbms_random.string('x',maxsize);
END random_string;
/
CREATE OR REPLACE FUNCTION random_integer(
maxvalue IN NUMBER
) RETURN NUMBER
IS
BEGIN
RETURN trunc(dbms_random.value(1,maxvalue));
END random_integer;
/
# add some data into tables
###########################
set timing ON
DECLARE
imax NUMBER default 100000;
i number;
begin
dbms_random.seed (val => 0);
for i in 1 .. imax loop
insert /*+ APPEND */ into T0 (d,c,n) values (random_date(DATE'2000-01-01',SYSDATE),random_string(20),random_integer(999999999));
end loop;
commit;
end;
/
DECLARE
imax NUMBER default 100000;
i number;
begin
dbms_random.seed (val => 0);
for i in 1 .. imax loop
insert /*+ APPEND */ into T1 (d,c,n1,n2) values (random_date(DATE'2000-01-01',SYSDATE),random_string(10),random_integer(999999999),random_integer(999999999));
end loop;
commit;
end;
/
DECLARE
imax NUMBER default 100000;
i number;
begin
dbms_random.seed (val => 0);
for i in 1 .. imax loop
insert /*+ APPEND */ into T2 (d,n1,n2,n3) values (random_date(DATE'2000-01-01',SYSDATE),random_integer(999999999),random_integer(999999999),random_integer(999999999));
end loop;
commit;
end;
/
DECLARE
imax NUMBER default 100000;
i number;
begin
dbms_random.seed (val => 0);
for i in 1 .. imax loop
insert /*+ APPEND */ into T3 (d,n,c1,c2,c3) values (random_date(DATE'2000-01-01',SYSDATE),random_integer(999999999),random_string(10),random_string(10),random_string(10));
end loop;
commit;
end;
/
# run this PL/SQL block to generate living data
###############################################
connect WOMBAT/"NDbGvewNHVj8@#2FFGfz!De";
DECLARE
i number;
begin
loop
sys.dbms_session.sleep(5);
dbms_random.seed (val => 0);
i:=random_integer(999999999);
insert into JOB (d) values (sysdate);
update T0 set c=random_string(20) where n=i;
update T1 set c=random_string(20) where n2 between i-1000 and i+1000;
update T2 set d=random_date(DATE'2000-01-01',SYSDATE) where n1 between i-1000 and i+1000;
update T3 set c1=random_string(20),d=random_date(DATE'2000-01-01',SYSDATE) where n between i-1000 and i+1000;
insert into T0 (d,c,n) values (random_date(DATE'2000-01-01',SYSDATE),random_string(20),random_integer(999999999));
insert into T1 (d,c,n1,n2) values (random_date(DATE'2000-01-01',SYSDATE),random_string(10),random_integer(999999999),random_integer(999999999));
insert into T2 (d,n1,n2,n3) values (random_date(DATE'2000-01-01',SYSDATE),random_integer(999999999),random_integer(999999999),random_integer(999999999));
insert into T3 (d,c1,c2,c3) values (random_date(DATE'2000-01-01',SYSDATE),random_string(10),random_string(10),random_string(10));
commit;
exit when 1=0;
end loop;
end;
/
## Golden Gate setup
####################
# on source & destination
alias gg='rlwrap /app/oracle/product/ogg21/ggsci'
create user OGGADMIN identified by "eXtpam!ZarghOzVe81p@1";
# maybe too much
grant DBA to OGGADMIN;
Edit params ./GLOBALS
#-->
GGSCHEMA OGGADMIN
#<--
# on source
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//togoria:1521/ANDOPRD password "eXtpam!ZarghOzVe81p@1" alias ANDOPRD domain admin
dblogin useridalias ANDOPRD domain admin
# on destination
add credentialstore
info credentialstore domain admin
alter credentialstore add user OGGADMIN@//bakura:1521/EWOKPRD password "Chan8em11fUwant!" alias EWOKPRD domain admin
info credentialstore domain admin
dblogin useridalias EWOKPRD domain admin
# setup replication only for tables T0, T1 and T2
#################################################
# on source machine
dblogin useridalias ANDOPRD domain admin
list tables WOMBAT.*
add trandata WOMBAT.T0
add trandata WOMBAT.T1
add trandata WOMBAT.T2
edit params extr_w1
-------------------------------->
EXTRACT extr_w1
useridalias ANDOPRD domain admin
EXTTRAIL ./dirdat/w1
LOGALLSUPCOLS
UPDATERECORDFORMAT compact
table WOMBAT.T0;
table WOMBAT.T1;
table WOMBAT.T2;
<--------------------------------
dblogin useridalias ANDOPRD domain admin
register extract extr_w1 database
add extract extr_w1, integrated tranlog, begin now
add exttrail ./dirdat/w1, extract extr_w1, megabytes 5
start extr_w1
info extr_w1
# on source, configure the datapump
edit params dpump_w1
-------------------------------->
EXTRACT dpump_w1
PASSTHRU
RMTHOST bakura, MGRPORT 7809
RMTTRAIL ./dirdat/w1
table WOMBAT.T0;
table WOMBAT.T1;
table WOMBAT.T2;
<--------------------------------
add extract dpump_w1, exttrailsource ./dirdat/w1
add rmttrail ./dirdat/w1, extract dpump_w1, megabytes 5
start dpump_w1
info dpump_w1
# on target, setup replcat but not start it
edit params repl_w1
-------------------------------->
REPLICAT repl_w1
ASSUMETARGETDEFS
DISCARDFILE ./dirrpt/repl_w1.dsc, purge
useridalias EWOKPRD domain admin
MAP WOMBAT.T0, TARGET OTTER.T0;
MAP WOMBAT.T1, TARGET OTTER.T1;
MAP WOMBAT.T2, TARGET OTTER.T2;
<--------------------------------
dblogin useridalias EWOKPRD domain admin
add replicat repl_w1, integrated, exttrail ./dirdat/w1
# perform the intial LOAD
#########################
# Note down the current scn of the source database
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
4531616
# on destination, import tables
create public database link ANDOPRD connect to OGGADMIN identified by "Chan8em11fUwant!" using '//togoria:1521/ANDOPRD';
select * from DUAL@ANDOPRD;
# create target schema using same DDL defionotion as on source database
create user OTTER identified by "50DbGvewN00K@@)2FFGfzKg";
grant connect, resource to OTTER;
alter user OTTER quota unlimited on USERS;
impdp userid=OGGADMIN/"Chan8em11fUwant!"@//bakura:1521/EWOKPRD logfile=MY:WOMBAT_01.log network_link=ANDOPRD tables=WOMBAT.T0,WOMBAT.T1,WOMBAT.T2 flashback_scn=4531616 remap_schema=WOMBAT:OTTER
start repl_w1, aftercsn 4531616
# when LAG is catched, retart replcat
stop repl_w1
start repl_w1
info repl_w1
# add 2 tables to SYNC
######################
# on source, add 2 tables to extract & datapump
stop dpump_w1
stop extr_w1
# add new tables in extract & datapump parameter files
edit params extr_w1
-------------------------------->
EXTRACT extr_w1
useridalias ANDOPRD domain admin
EXTTRAIL ./dirdat/w1
LOGALLSUPCOLS
UPDATERECORDFORMAT compact
table WOMBAT.T0;
table WOMBAT.T1;
table WOMBAT.T2;
table WOMBAT.JOB;
table WOMBAT.T3;
<--------------------------------
# add trandata for new tables
dblogin useridalias ANDOPRD domain admin
list tables WOMBAT.*
add trandata WOMBAT.JOB
add trandata WOMBAT.T3
start extr_w1
info extr_w1
edit params dpump_w1
-------------------------------->
EXTRACT dpump_w1
PASSTHRU
RMTHOST bakura, MGRPORT 7809
RMTTRAIL ./dirdat/w1
table WOMBAT.T0;
table WOMBAT.T1;
table WOMBAT.T2;
table WOMBAT.JOB;
table WOMBAT.T3;
<--------------------------------
start dpump_w1
info dpump_w1
# once extract & datapump are up and running, we will proceed with the initial load of the nexw tables using expdp/impdp
# Note down the current scn of the source database
SQL> select current_scn from v$database;
CURRENT_SCN
-----------
4675686
impdp userid=OGGADMIN/"Chan8em11fUwant!"@//bakura:1521/EWOKPRD logfile=MY:WOMBAT_02.log network_link=ANDOPRD tables=WOMBAT.JOB,WOMBAT.T3 flashback_scn=4675686 remap_schema=WOMBAT:OTTER
# on target, stop replicat, add new tables and start FROM THE GOOD SCN ON NEW TABLES
stop repl_w1
edit params repl_w1
-------------------------------->
REPLICAT repl_w1
ASSUMETARGETDEFS
DISCARDFILE ./dirrpt/repl_w1.dsc, purge
useridalias EWOKPRD domain admin
MAP WOMBAT.T0, TARGET OTTER.T0;
MAP WOMBAT.T1, TARGET OTTER.T1;
MAP WOMBAT.T2, TARGET OTTER.T2;
MAP WOMBAT.JOB, TARGET OTTER.JOB, filter(@GETENV ('TRANSACTION','CSN') > 4633243);
MAP WOMBAT.T3, TARGET OTTER.T3, filter(@GETENV ('TRANSACTION','CSN') > 4633243);
<--------------------------------
start repl_w1
info repl_w1
# wen lag is catched, remove SCN clauses from replicat and restart
stop repl_w1
edit params repl_w1
-------------------------------->
REPLICAT repl_w1
ASSUMETARGETDEFS
DISCARDFILE ./dirrpt/repl_w1.dsc, purge
useridalias EWOKPRD domain admin
MAP WOMBAT.T0, TARGET OTTER.T0;
MAP WOMBAT.T1, TARGET OTTER.T1;
MAP WOMBAT.T2, TARGET OTTER.T2;
MAP WOMBAT.JOB, TARGET OTTER.JOB;
MAP WOMBAT.T3, TARGET OTTER.T3;
<--------------------------------
start repl_w1
info repl_w1

141
Golden_Gate/setup.md Normal file
View File

@@ -0,0 +1,141 @@
## Articles
https://www.dbi-services.com/blog/how-to-create-an-oracle-goldengate-extract-in-multitenant/
http://blog.data-alchemy.org/posts/oracle-goldengate-pluggable/
## Topology
Databases:
- source: CDB: JEDIPRD@wayland, PDB: YODA
- target: CDB: SITHPRD@togoria, PDB: MAUL
## Databases setup for Golden Gate
In **both** databases, create Golden Gate admin user in `CDB$ROOT`:
create user c##oggadmin identified by "Secret00!";
alter user c##oggadmin quota unlimited on USERS;
grant create session, connect,resource,alter system, select any dictionary, flashback any table to c##oggadmin container=all;
exec dbms_goldengate_auth.grant_admin_privilege(grantee => 'c##oggadmin',container=>'all');
alter user c##oggadmin set container_data=all container=current;
grant alter any table to c##oggadmin container=ALL;
alter system set enable_goldengate_replication=true scope=both;
alter database force logging;
alter database add supplemental log data;
select supplemental_log_data_min, force_logging from v$database;
> On **target** database I had to add extra grants:
grant select any table to c##oggadmin container=ALL;
grant insert any table to c##oggadmin container=ALL;
grant update any table to c##oggadmin container=ALL;
grant delete any table to c##oggadmin container=ALL;
Create schemas for replicated tables on source and target PDB:
alter session set container=YODA;
create user GREEN identified by "Secret00!";
alter user GREEN quota unlimited on USERS;
grant connect,resource to GREEN;
connect GREEN/"Secret00!"@wayland/YODA;
alter session set container=MAUL;
create user RED identified by "Secret00!";
alter user RED quota unlimited on USERS;
grant connect,resource to RED;
connect RED/"Secret00!"@togoria/MAUL;
## Setup `exegol` Golden Gate deployment
> My Root CA (added to truststore host) has not be recognized by `admincmient` resulting OGG-12982 error while `curl` works perfectly.
Solution: define `OGG_CLIENT_TLS_CAPATH` environement variable to my root CA certificate prior to using `admincmient`
export OGG_CLIENT_TLS_CAPATH=/etc/pki/ca-trust/source/anchors/rootCA.pem
Add in the credentialstore enteries for database connections:
adminclient
connect https://exegol.swgalaxy:2000 deployment ogg_exegol_deploy as OGGADMIN password "Secret00!"
Optionaly store credentials to connect to deployement:
add credentials admin user OGGADMIN password "Secret00!"
Now we can hide the password when conecting to deployement:
connect https://exegol.swgalaxy:2000 deployment ogg_exegol_deploy as admin
Add in the credentialstore enteries for database connections:
create wallet
add credentialstore
alter credentialstore add user c##oggadmin@wayland/JEDIPRD password "Secret00!" alias JEDIPRD
alter credentialstore add user c##oggadmin@wayland/YODA password "Secret00!" alias YODA
info credentialstore
Test database connections:
dblogin useridalias JEDIPRD
dblogin useridalias YODA
To delete a user from credential store:
alter credentialstore delete user JEDIPRD
> IMPORTANT: in a database **MULTITENANT** architecture, Golden Gate is working at `CDB$ROOT` level.
Create the checkpoint table:
dblogin useridalias JEDIPRD
add checkpointtable YODA.c##oggadmin.checkpt
Set **global** parameters:
edit GLOBALS
Put:
ggschema c##oggadmin
checkpointtable YODA.c##oggadmin.checkpt
## Setup `helska` Golden Gate deployment
adminclient
connect https://helska.swgalaxy:2000 deployment ogg_helska_deploy as OGGADMIN password "Secret00!"
Optionaly store credentials to connect to deployement:
add credentials admin user OGGADMIN password "Secret00!"
Now we can hide the password when conecting to deployement:
connect https://helska.swgalaxy:2000 deployment ogg_helska_deploy as admin
Add in the credentialstore enteries for database connections:
alter credentialstore add user c##oggadmin@togoria/SITHPRD password "Secret00!" alias SITHPRD
alter credentialstore add user c##oggadmin@togoria/MAUL password "Secret00!" alias MAUL
info credentialstore
Test database connections:
dblogin useridalias SITHPRD
dblogin useridalias MAUL
Create the checkpoint table:
dblogin useridalias SITHPRD
add checkpointtable MAUL.c##oggadmin.checkpt
Set **global** parameters:
edit GLOBALS
Put:
ggschema c##oggadmin
checkpointtable MAUL.c##oggadmin.checkpt