Files
oracle/vg/scratchpad.sql
2026-03-12 21:23:47 +01:00

2992 lines
122 KiB
MySQL
Raw Permalink Blame History

# print leading zero in 2 digit numbers in bash
for i in $(seq -f "%02g" 1 15)
do
echo $i
done
for (( i=1; i<=17 ; i++ ));
do
printf '%02d\n' $i;
done
----------------------------------------
-- Switch to Pluggable Database (PDB)
----------------------------------------
SQL> ALTER SESSION SET container = pdb1;
SQL> ALTER SESSION SET container = cdb$root;
SELECT t.name
, min(m.begin_time) begin_time
, max(m.end_time) end_time
, SUM(m.physical_reads) physical_reads
, ROUND(SUM(m.physical_block_reads * d.BLOCK_SIZE)/power(1024,2)) physical_reads_mb
, SUM(m.physical_writes) physical_writes
, ROUND(SUM(m.physical_block_writes * d.BLOCK_SIZE)/power(1024,2)) physical_writes_mb
, MAX(m.average_read_time)/10 average_read_time_ms
, MAX(m.average_write_time)/10 average_write_time_ms
FROM v$filemetric m
JOIN v$datafile d ON d.file# = m.file_id
JOIN v$tablespace t ON d.TS# = t.ts#
GROUP BY t.name
ORDER BY physical_reads_mb + physical_writes_mb desc
--ORDER BY physical_writes_mb desc
--ORDER BY physical_writes desc
--ORDER BY physical_reads_mb desc
--ORDER BY physical_reads desc
;
@session_active % 30 ""
@sortusage
@sortsegment
@top_session_by_cpu % 30
@awr_report_rac
@awr_report_rac "2015-09-23 00:00:00" "2015-03-01 00:02:00" html
@top_session_by_all % 20 "redo size" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' "
----------------------------------------
-- DBA Registry
----------------------------------------
set lines 100
column comp_id format a20
column comp_name format a50
column status format a20
select comp_id, comp_name, status from dba_registry order by 1;
# print leading zero in 2 digit numbers in bash
for i in $(seq -f "%02g" 1 15)
do
echo $i
done
for (( i=1; i<=17 ; i++ ));
do
printf '%02d\n' $i;
done
----------------------------------------
-- Switch to Pluggable Database (PDB)
----------------------------------------
SQL> ALTER SESSION SET container = pdb1;
SQL> ALTER SESSION SET container = cdb$root;
SELECT t.name
, min(m.begin_time) begin_time
, max(m.end_time) end_time
, SUM(m.physical_reads) physical_reads
, ROUND(SUM(m.physical_block_reads * d.BLOCK_SIZE)/power(1024,2)) physical_reads_mb
, SUM(m.physical_writes) physical_writes
, ROUND(SUM(m.physical_block_writes * d.BLOCK_SIZE)/power(1024,2)) physical_writes_mb
, MAX(m.average_read_time)/10 average_read_time_ms
, MAX(m.average_write_time)/10 average_write_time_ms
FROM v$filemetric m
JOIN v$datafile d ON d.file# = m.file_id
JOIN v$tablespace t ON d.TS# = t.ts#
GROUP BY t.name
ORDER BY physical_reads_mb + physical_writes_mb desc
--ORDER BY physical_writes_mb desc
--ORDER BY physical_writes desc
--ORDER BY physical_reads_mb desc
--ORDER BY physical_reads desc
;
-- ASH Queries
--Long running queries from ASH
@ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.user_id = (select user_id from dba_users where username = upper('kelting')) "
@ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:05:00' and ash.service_hash = (select name_hash from dba_services where name = 'trafvaluationsse30')"
@ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:20:00' "
@ash_sid_sqlids % % % 24 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:00:15' and ash.user_id = 143 "
@ash_sid_sqlids % % % 24 % "and trunc(ash.px_flags / 2097152) > 10"
@ash_sid_sqlids % % % 1 % " and (ash.sample_time - ash.sql_exec_start) > '+0 00:00:20' and ash.force_matching_signtaure = '8251690068665765049' "
-- ASH Queries (AWR)
--Long running queries from ASH (AWR)
select name_hash from dba_services where name = 'pr01pimi_sp_con'; -- 2950871375
select name_hash from dba_services where name = 'pr01pimi_job_con'; -- 815460948
select name_hash from dba_services where name = 'pr01pimi_ace_con'; -- 3379930795
select name_hash from dba_services where name = 'pr01pimi_sofia_con'; -- 3657673531
select user_id, username from dba_users where username like upper('%username%');
@awr_sid_sqlids % % % 24 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:10:00' and ash.user_id = 6089 "
@awr_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.service_hash = 2950871375 " "and ash.sample_time between to_timestamp('31-MAY-16 19:00:00','DD-MON-YY HH24:MI:SS') and to_timestamp('31-MAY-16 20:00:00','DD-MON-YY HH24:MI:SS')"
@awr_sid_sqlids % % % 24*2 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.user_id = 3461" "and ash.sample_time between to_timestamp('13-SEP-15 03:13:00','DD-MON-YY HH24:MI:SS') and to_timestamp('12-SEP-15 09:12:00','DD-MON-YY HH24:MI:SS')"
-- ASH Temp Usage
@ash_sid_sqlids % % % 24 % "" "" " having MAX(NVL(ash.temp_space_allocated,0))/power(1024,3) > 1 " -- Temp usage > 1GB
@ash_sid_details % % % 24 "and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB
@ash_sid_sqlids % % % 24 % "and ash.user_id IN (select user_id from dba_users where temporary_tablespace ='TRAF_TITAN_TEMP')" "" " having SUM(ash.temp_space_allocated)/power(1024,3) > 1 " -- Temp usage > 1GB
@ash_sid_details % % % 1 " and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB
@awr_sid_sqlids % % % 24*1 % "" "" " having MAX(NVL(ash.temp_space_allocated,0))/power(1024,3) > 10 " -- Temp usage > 1GB
@awr_sid_details % % % 1 " and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB
@ash_sid_details % % % 24 "and ash.user_id IN (select user_id from dba_users where temporary_tablespace ='TRAF_TITAN_TEMP')" "and ash.temp_space_allocated/power(1024,1) > 1 " -- Temp usage > 1GB
@awr_sid_sqlids % % % 7*24 % "and ash.sample_time between to_timestamp('10-JUN-16 06:00:00','DD-MON-YY HH24:MI:SS') and to_timestamp('10-JUN-16 07:00:00','DD-MON-YY HH24:MI:SS')" "" " having SUM(ash.temp_space_allocated)/power(1024,3) > 1 " -- Temp usage > 1GB
-- ASH PGA USage
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.pga_allocated)/power(1024,3) > 2 " -- PGA usage > 2GB
-- ASH Physical IO
@ash_sid_sqlids % % % 1 % "" "" " having (SUM(ash.delta_read_io_bytes)+SUM(ash.delta_write_io_bytes) ) / power(1024,3) > 20" -- Physical Read/Write > 20GB
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_read_io_bytes) / power(1024,3) > 20" -- Physical Read > 20GB
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_write_io_bytes) / power(1024,3) > 20" -- Physical Write > 20GB
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_interconnect_io_bytes) / power(1024,3) > 20" -- Interconnect IO > 20GB
@ash_sid_details % % % 5 "and (ash.delta_read_io_bytes+ash.delta_write_io_bytes)/ power(1024,3) > 1 "
-- Physical Read > 20GB
@ash_sid_sqlids % % % 1 % "and ash.user_id = (select user_id from dba_users where username = upper('username')) " "" " having SUM(ash.delta_read_io_bytes) / power(1024,3) > 20 "
@ash_sid_sqlids % % % 1 % "" "" " having (SUM(ash.delta_read_io_requests)+SUM(ash.delta_write_io_requests) ) > 1000000" -- Physical Read/Write IOPS
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_read_io_requests) > 1000000" -- Physical Read IOPS
@ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_write_io_requests) > 1000000" -- Physical Write IOPS
-- Details of session with long running queries from ASH
@ash_sid_details % % % 5 "and (ash.sample_time - ash.sql_exec_start) > '+0 00:05:00' and ash.service_hash = (select name_hash from dba_services where name = 'pr01pimi_sp_con') "
@top_session_by_all % 20 "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "redo size" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "CPU used by this session" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "consistent gets" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "physical reads" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "physical read total bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "physical read total bytes optimized" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "cell physical IO interconnect bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "cell physical IO bytes saved by storage index" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' "
@top_session_by_statname % 20 "physical writes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "physical write total bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "physical write total bytes optimized" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'"
@top_session_by_statname % 20 "session pga memory" ""
@top_session_by_statname % 20 "session pga memory max" ""
@top_session_by_statname % 20 "bytes sent via SQL*Net to client" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' "
@top_session_by_statname % 20 "bytes received via SQL*Net from client" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' "
@top_session_by_statname % 20 "execute count" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' "
redo size
CPU used by this session
consistent gets
cell physical IO bytes saved by storage index
cell physical IO interconnect bytes
cell physical IO interconnect bytes returned by smart scan
physical read total bytes
physical read total bytes optimized
physical write total bytes
physical write total bytes optimized
bytes received via SQL*Net from client
bytes received via SQL*Net from dblink
bytes sent via SQL*Net to client
bytes sent via SQL*Net to dblink
@session_breakdown
@logswitchsize
@awr_loadprofile_physicalreads % 30
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "CPU used by this session"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "physical read total bytes"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "physical write total bytes"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "session pga memory"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "redo size"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (disk)"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (memory)"
@awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (rows)"
@awr_loadprofile_logicalreads % 7
@awr_loadprofile_physicalreads % 7
@awr_loadprofile_physicalwrites % 7
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "CPU used by this session"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "physical read total bytes"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "physical write total bytes"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "session pga memory"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "redo size"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (disk)"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (memory)"
@awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (rows)"
-- Last 2 hours
@awr_top_sql_by_all 2 % "" "" 10
-- Last 10 hours
@awr_top_sql_by_all 10 % "" "" 10
-- Between a specific timeframe
@awr_top_sql_by_all % % "2015-09-28 00:00:00" "2015-10-05 00:05:00" 10
@awr_top_sql_by_all % % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_weight % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.sql_id not in ('d7y4tdacc7f3j','5ysyjtdsjwsr2','7akf6xvjp5a6d','3s2u15uzjum48','5ysyjtdsjwsr2','4sp1tg6fv1a9p','bcv9qynmu1nv9','cm5vu20fhtnq1','7wv97t3szuvtd','ar9nmtmd28460','4m7m0t6fjcs5x,'2skwhauh2cwky','12kw3xcxv1qpg','9tgj4g8y4rwy8')"
@awr_top_sql_weekly_client
@awr_top_sql_weekly_client % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30
@awr_top_sql_weekly_client % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'username'"
@awr_top_sql_weekly_client % % "2015-01-27 14:00:00" "2015-01-27 19:02:00" 10 "and ss.parsing_schema_name like 'username'"
@awr_sqlstats_weekly 1xnhn6v30q1t0
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.sql_id in ('47604xybfj03j')"
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name = 'username'"
@awr_top_sql_by_all % % "2015-06-01 00:00:00" "2015-06-04 00:02:00" 10 "and ss.parsing_schema_name like 'username'"
@awr_top_sql_by_weight % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'username'"
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'FES%'"
@awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'AE%'"
@awr_top_sql_by_all % % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "and ss.parsing_schema_name like 'AE_%'"
@awr_top_sql_by_cpu % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_exec % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_elapsed_time % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_physicalread % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_physicalwrite % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql_by_buffer_gets % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "redo size" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "elapsed_time_total" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "executions_total" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_change_bytes_total" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_write_bytes_total" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_read_bytes_total" "&&WHERECLAUSE"
@awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "buffer_gets_total" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "db_block_changes" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_reads" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_writes" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_reads_direct" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_writes_direct" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "logical_reads" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "space_used" "&&WHERECLAUSE"
@awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "space_allocated" "&&WHERECLAUSE"
AWR Top Segment Statistics
##################################
db_block_changes
physical_reads
physical_writes
physical_reads_direct
physical_writes_direct
logical_reads
space_used
space_allocated
table_scans
itl_waits
row_locks_waits
chain_row_excess
buffer_busy_waits
gc_buffer_busy
gc_cr_blocks_served
gc_cu_blocks_served
gc_cr_blocks_received
gc_cu_blocks_received
PGA Usage Vs Temp Usage Statistics
#####################################
'workarea memory allocated' - the total amount of PGA memory dedicated to workareas allocated in Kb.
'workarea executions - optimal' - the cumulative count of work areas which had an optimal size. For example optimal size is defined if the sort does not need to spill to the disk
'workarea executions - onepass' - the cumulative count of work areas using the one pass size. One pass is generally used for big work areas where spilling to disk cannot be avoided.
'workarea executions - multipass' - the cumulative count of work areas running in more than one pass. This should be avoided and is the symptom of poorly tuned system.
@statname "%physical%"
@awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea memory allocated"
@awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - optimal" -- In-memory (PGA) execution
@awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - onepass" -- Execution spilling to tempspace
@awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - multipass"
#Row Chaining
@awr_statistics_delta % "2015-09-28 00:00:00" "2015-09-29 00:00:00" "table fetch continued row"
@awr_statistics_delta % "2015-09-28 00:00:00" "2015-09-29 00:00:00" "table fetch by rowid"
@awr_pga_stats_delta_by_statname % "2015-09-23 00:00:00" "2015-09-23 00:00:00" "total PGA allocated"
@pga_stats -- This will give you the PGA stat name
#####################################################
aggregate PGA target parameter
aggregate PGA auto target
cache hit percentage
total PGA allocated
total PGA inuse
total freeable PGA memory
total PGA used for auto workareas
total PGA used for manual workareas
max processes count
process count
maximum PGA allocated
maximum PGA used for auto workareas
maximum PGA used for manual workareas
over allocation count
bytes processed
extra bytes read/written
PGA memory freed back to OS
recompute count (total)
global memory bound
@awr_service_stats %grid% "logons cumulative" "2013-04-04 12:00:00" "2015-03-21 01:05:00"
System Events
######################
@eventname "%log%switch%"
@awr_system_event_time "log file switch (archiving needed)" 3
@awr_system_event_time "log file switch (archiving needed)" 3
@awr_system_event_time "log file switch (checkpoint incomplete)" 3
@awr_system_event_time "log file switch completion" 3
@awr_system_event_time "user I/O wait time" 3
set tab off
set lines 190 pages 0 arraysize 63
select * from table(dbatavaria.jss.gtop(50));
########################
# RAC Commands
######################
# Check CRS Stack internal resources status.
dcli -l oracle -g ~/dbs_group 'crsctl stat res -t -init -w "((NAME != ora.drivers.acfs) AND (NAME != ora.diskmon))"' |grep -v 'ONLINE ONLINE' |egrep -B1 'INTERMEDIATE|OFFLINE' |egrep -v '^--'
for host in `olsnodes`
do
echo "###### $host #######"
ssh $host 'crsctl stat res -t -init -w "((NAME != ora.drivers.acfs) AND (NAME != ora.diskmon))" |grep -v "ONLINE ONLINE" |egrep -B1 "INTERMEDIATE|OFFLINE" ' \
2>/dev/null |egrep -v '^--'
done
# Check CRS Resource dependency
crsctl status res -p -init |egrep '^NAME=|DEPENDE' | grep -B1 ora.gpnpd
crsctl status res -p |egrep '^NAME=|^STOP_DEPENDENCIES' | grep -B1 ora.oc4j
crsctl status res -p |egrep '^NAME=|^START_DEPENDENCIES' | grep -B1 ora.oc4j
crsctl status res -p -init |egrep '^NAME=|DEPENDENCIES' | while read line
do
done
########################
# Search Alert on Exadata
######################
less $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log
dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group 'egrep -A10 "Aug 01.*2012" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep "2012|ORA-00600|trc" | grep -B1 ORA- |egrep -v "^--" '
dcli -l oracle -g ~/dbs_group_pr01pimi 'egrep -A10 "Mar 31.*2014" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v "opiodr|ORA-3136" | egrep "2014|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" '
dcli -l oracle -g ~/dbs_group_bt01pimi 'egrep -A10 "Mar 31.*2014" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v "opiodr|ORA-3136" | egrep "2014|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" '
dcli -l oracle -c dm02db04,dm02db05,dm02db06 'egrep -A10 "Oct 16.*2013" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v opiodr | egrep "2013|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" '
dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group 'find /u01/app/oracle/diag/rdbms/pr01pimi/ -name '*trc' -exec grep -l deadlock {} \; '
##############################
# Search deadlock on Exadata
###############################
dcli -l oracle -g ~/dbs_group "find /u01/app/oracle/diag/rdbms/pr01pimi -name '*trc' -mtime -1 -exec grep -l deadlock {} \; "
dcli -l oracle -g ~/dbs_group "find /u01/app/oracle/diag/rdbms/pr01pimi -name '*trc' -mtime -1 -exec grep -l deadlock {} \; "
#####################################
# Search Listener Log on Exadata
#####################################
-- Verify SCAN listener start times
dcli -l oracle -g ~/dbs_group 'ps -ef|grep tns|grep SCAN|grep -v grep '
cat ~/dbs_group_pr01pimi
cat ~/dbs_group_bt01pimi
--Scan Listeners
less $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener_scan*/trace/listener_scan*.log
dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group "grep 07-AUG-2012 $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener_scan*/trace/listener_scan*.log |egrep 'vamiot' "
DMZ - /u01/oracle/11.2.0.3/grid/log
for HOST in `olsnodes`
do
ssh $HOST "egrep 'MAY-2013' /u01/app/oracle/diag/tnslsnr/${HOST}/listener/trace/listener.log \
| egrep -v 'service_update' \
2> /dev/null
" 2> /dev/null
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}'
for HOST in `cat ~/dbs_group_bt01pimi`
do
ssh $HOST "egrep 'JUL-2014' /u01/app/oracle/diag/tnslsnr/${HOST}/listener/trace/listener.log \
| egrep 'Optimizer' |egrep mpilyavs \
2> /dev/null
" 2> /dev/null
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}'
srvctl status scan_listener |grep running|awk '{print $3 "," $8}' |while read line
do
HOST=`echo $line|cut -d, -f2`
LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]`
ssh $HOST "grep '01-OCT-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \
|egrep -v 'service_update' |grep sas
"
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}'
srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line
do
HOST=`echo $line|cut -d, -f2`
LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]`
ssh $HOST "egrep '09-MAY-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \
|egrep -v 'service_update' |grep dabhi
"
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr
#### Symphony Grid Connections
srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line
do
HOST=`echo $line|cut -d, -f2`
LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]`
ssh $HOST "egrep \"`date --date=yesterday +%d-%b-%Y|tr [:lower:] [:upper:]`|`date +%d-%b-%Y|tr [:lower:] [:upper:]`\" /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \
|grep sas
"
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-3 |uniq -c
srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line
do
HOST=`echo $line|cut -d, -f2`
LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]`
ssh $HOST "egrep -h '31-JUL-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/listener_scan*/trace/listener_scan*.log \
|grep sas | grep devmtg| grep python
"
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-2 |uniq -c
for HOST in `olsnodes`
do
ssh $HOST "egrep -h '31-JUL-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/listener_scan*/trace/listener_scan*.log \
| egrep -v 'service_update' |egrep -i 'sas' \
2> /dev/null
" 2> /dev/null
done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-3 |uniq -c
24-OCT-2013 01:39:15 * (CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=servername)(CID=(PROGRAM=sqlplus)(HOST=hostname)(USERUSER=username))) * (ADDRESS=(PROTOCOL=tcp)(HOST=144.77.86.141)(PORT=60263)) * establish * servicename * 0
# Filter for String
| awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr
| awk -F"SERVICE_NAME=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr
| awk -F"PROGRAM=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr
| awk -F"HOST=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr
| awk '
{
date=$1;
time=$2;
program="";
service_name="";
user="";
host="";
n=split($0 , array1, "(" );
for (i=1; i<=n; i++) {
split(array1[i] ,array2 , ")" )
split(array2[1] ,array3 , "=")
field=array3[1]
value=array3[2]
if (field == "PROGRAM") { program=value}
if (field == "SERVICE_NAME") { service_name=value}
if (field == "USER") { user=value}
if (field == "HOST") { if (host=="" || host=="__jdbc__") { host=value} }
}
print date " " time " * " service_name " * " host " * " user " * " program " * " ;
}
'
# Add following for logins/day
|awk '{print $1}' |uniq -c
# Add following for logins/hour
|cut -d: -f1 |uniq -c
# Add following for logins/10min
|cut -c1-16 |uniq -c
# Add following for logins/min
|cut -d: -f1-2 |uniq -c
# Add following for logins/sec
|cut -d" " -f1-2 |uniq -c
# Add following for logins/service_name
| awk -F"SERVICE_NAME=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr
# Add following for logins/program
| awk -F"PROGRAM=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr
# Add following for logins/clienthost
| awk -F"HOST=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr
# Add following for logins/user
| awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr
-- Host Listeners
less $ORACLE_BASE/diag/tnslsnr/`hostname -s`/listener/trace/listener.log
dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group "grep 1*-AUG-2012 $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener/trace/listener.log |egrep 'trs20'"
#####################################
# Exadata verify tnsnames.ora
#####################################
firstnode=`head -1 dbs_group`
dcli -l oracle -g dbs_group "scp $ORACLE_HOME/network/admin/tnsnames.ora $firstnode:/tmp/tnsnames.ora_\`hostname -s\`"
cat dbs_group |grep -v $firstnode |while read i
do
echo "######## diff /tmp/tnsnames.ora_$firstnode /tmp/tnsnames.ora_$i #############"
diff /tmp/tnsnames.ora_$firstnode /tmp/tnsnames.ora_$i
done
##### Sync the tnsnames.ora
firstnode=`head -1 dbs_group`
cp -p /tmp/tnsnames.ora_$firstnode $ORACLE_HOME/network/admin/tnsnames.ora
dcli -l oracle -g dbs_group -d $ORACLE_HOME/network/admin/ -f $ORACLE_HOME/network/admin/tnsnames.ora
#####################################
# Kill Sessions
#####################################
BEGIN
FOR i in ( select 'alter system kill session ''' || sid || ',' || serial# || ',@' || inst_id || ''' immediate' sqltext
from gv$session s
where 1=1
AND s.status <> 'KILLED'
AND s.program like '%%'
-- AND s.username like '%%'
AND s.osuser like '@%'
-- AND s.sql_id = '0ggabydqkkgrf'
-- AND s.service_name = '%%'
ORDER BY s.logon_time desc
)
LOOP
begin
execute immediate i.sqltext;
exception
WHEN OTHERS THEN
null;
END;
END LOOP;
END;
/
select 'alter system kill session ''' || sid || ',' || serial# || ',@' || inst_id || ''';' , s.program from gv$session s where s.program like '%proteusNightCycle%';
#############################
GoldenGate (Run in BASH shell)
#############################
help stats extract
help stats replicat
# Tables ordered by operations
#echo 'STATS PPRD041 , HOURLY , REPORTRATE SEC' |./ggsci |egrep 'Replicating|Extracting|Total operations/second' | while read line ;
#echo 'STATS PPRD041 , HOURLY , REPORTRATE MIN' |./ggsci |egrep 'Replicating|Extracting|Total operations/minute' | while read line ;
#echo 'STATS PPRD041 , HOURLY , REPORTRATE HR' |./ggsci |egrep 'Replicating|Extracting|Total operations/hour' | while read line ;
#echo 'STATS PPRD041 , DAILY , REPORTRATE SEC' |./ggsci |egrep 'Replicating|Extracting|Total operations/second' | while read line ;
#echo 'STATS PPRD041 , DAILY , REPORTRATE MIN' |./ggsci |egrep 'Replicating|Extracting|Total operations/minute' | while read line ;
#echo 'STATS PPRD041 , DAILY , REPORTRATE HR' |./ggsci |egrep 'Replicating|Extracting|Total operations/hour' | while read line ;
#
echo 'STATS PPRD041 , DAILY ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ;
#echo 'STATS PPRD041 , LATEST ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ;
#echo 'STATS PPRD041 , HOURLY ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ;
do
if [[ "$line" == *Extracting* ]] || [[ "$line" == *Replicating* ]]
then
printf "\n`echo $line|awk '{print $5}'`" ;
elif [[ "$line" == *since* ]]
then
printf " $line" ;
else [[ "$line" == *operations* ]]
printf "\t\t `echo $line|awk '{print $3}'`" ;
fi
done |sort -k2nr 2>/dev/null |grep -v '^$' |head -20 ; echo "";
INFO PPRD041 detail
INFO PPRD041 SHOWCH
send PPRD041, showtrans count 5 TABULAR
STATS PPRD041 , LATEST REPORTRATE SEC
STATS PPRD041 , LATEST REPORTRATE MIN
STATS PPRD041 , LATEST REPORTRATE HR
STATS PPRD041 , LATEST
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, REPORTRATE HR
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE table
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE dbo.trade_block , REPORTRATE HR
STATS PPRD041 , TOTALSONLY *
STATS PPRD041 , LATEST *
STATS PPRD041 , TOTALSONLY dbo.pos_pr_daily_temp
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE dbdbo.swap_trade
STATS PPRD041 , TABLE dbo.swap_trade, TOTALSONLY *
STATS PPRD041 , TOTAL , TABLE dbo.pos_pr_daily_temp
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE HR
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE MIN
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE SEC
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TOTALSONLY FUNDSTATION_OWN.* , NOREPORTDETAIL , REPORTRATE HR
STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE FUNDSTATION_OWN.RATE , REPORTRATE HR
STATS PPRD041 table dbo.af_detail_trans
STATS PPRD041 table dbo.af_detail_trans_split
STATS PPRD041 , TOTALSONLY *
dbo.acct_position
dbo.acct_position_by_sector
View Trail file contents
################################
#Count the records every n minutes interval.
logdump <<EOF
NOTIFY 100000
open am000718
count
#count detail
#count FILE <tablename> detail
EOF
logdump <<EOF
#count log am00070*
count , START 2015-07-09 02:00:00 , END 2015-07-09 02:10:00 , LOG am*
EOF
logdump <<EOF
open am000714
count , START 2015-07-09 02:00:00 , END 2015-07-09 02:01:00
#count , START 2015-07-09 02:00:00 , END 2015-07-09 02:01:00 , DETAIL
EOF
logdump <<EOF
open am000714
DETAIL ON
#count , START 2015-07-09 02:00:00 , END 2015-07-09 02:01:00
count , START 2015-07-09 02:00:00 , END 2015-07-09 02:01:00 , LOG am00071*
#count , START 2015-07-09 02:00:00 , END 2015-07-09 02:01:00 , DETAIL
EOF
logdump <<EOF
NOTIFY 100000
open am000718
count interval 60 DETAIL
EOF
#FILTER ANSINAME dbo.af_detail_trans_split
logdump <<EOF
open am000714
FILTER INCLUDE STARTTIME 2015-07-09 02:00:00
FILTER INCLUDE ENDTIME 2015-07-09 02:10:00
count interval 1
EOF
for ((i=701;i<=720;i++))
do
logdump <<EOF
open am000$i
TRANSHIST 200
TRANSRECLIMIT 200000 records
TRANSBYTELIMIT 100000000 bytes
count
EOF
done
$ logdump
fileheader detail
ghdr on
ggstoken detail
fileheader detail
ghdr on
detail DATA
usertoken detail
ggstoken detail
headertoken detail
ASCIIHEADER on
ASCIIDUMP on
ASCIIDATA on
RECLEN 40000
env
open <tailfile>
position 22974377
position forward
next
or
position reverse
next
#### Go reverse from an RBA to end of transaction and then forward
position 8355707
detail off
position reverse
SCANFORENDTRANS
position forward
next
info REP_AM05
stop REP_AM05
info REP_AM05
alter REP_AM05 extseqno 359786 extrba 0
start REP_AM05
info REP_AM05
connect target sys/*****@TNS
connect AUXILIARY /
run
{
CONFIGURE DEVICE TYPE DISK PARALLELISM 24;
SET NEWNAME FOR DATABASE to '+RECO_DM02_EXT';
DUPLICATE TARGET DATABASE TO BT01PIMI
until SCN 503299730741
OPEN RESTRICTED
;
}
connect target sys/*****@TNS
connect AUXILIARY /
run
{
SET NEWNAME FOR DATABASE TO '+DATA_DM51';
DUPLICATE TARGET DATABASE
FOR STANDBY
FROM ACTIVE DATABASE
DORECOVER
SPFILE
SET "db_unique_name"="db_unique_name"
SET FAL_SERVER="akslfks"
SET LOG_ARCHIVE_DEST_2="SERVICE=pr01hsti ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) compression=enable"
SET DB_FILE_NAME_CONVERT=("+DATA/","+RECO_DM51/","+RECO/","+RECO_DM51/")
SET LOG_FILE_NAME_CONVERT=("+DATA/","+RECO_DM51/","+RECO/","+RECO_DM51/")
set standby_file_management='AUTO'
;
}
create spfile='+DATA_DM51/dbname/spfiledbname.ora' from pfile='/u01/app/oracle/product/11.2.0.2/dbhome_1/dbs/initdbname.ora';
allocate channel for maintenance device type disk;
delete noprompt archivelog all completed before 'sysdate - 1' ;
--**************************************************
-- Auditing Queries
--**************************************************
SELECT * FROM gv$system_parameter where name like '%audit%' order by name, inst_id;
SELECT * FROM DBA_STMT_AUDIT_OPTS;
SELECT * FROM DBA_PRIV_AUDIT_OPTS;
SELECT * FROM DBA_OBJ_AUDIT_OPTS;
SELECT * FROM STMT_AUDIT_OPTION_MAP;
SELECT * FROM ALL_DEF_AUDIT_OPTS;
SELECT * FROM DBA_AUDIT_EXISTS;
SELECT * FROM DBA_FGA_AUDIT_TRAIL;
SELECT * FROM DBA_COMMON_AUDIT_TRAIL;
SELECT * FROM DBA_AUDIT_STATEMENT;
SELECT * FROM DBA_AUDIT_SESSION;
SELECT * FROM DBA_AUDIT_OBJECT;
SELECT * FROM DBA_AUDIT_POLICIES;
SELECT * FROM DBA_AUDIT_POLICY_COLUMNS;
AUDIT SESSION WHENEVER NOT SUCCESSFUL;
AUDIT SESSION BY <username>;
NOAUDIT SESSION;
NOAUDIT SESSION BY <username>;
AUDIT DELETE ON <table_name> BY SESSION WHENEVER SUCCESSFUL ;
AUDIT DELETE ON <table_name> BY ACCESS WHENEVER SUCCESSFUL ;
AUDIT DELETE ON <table_name> BY SESSION WHENEVER NOT SUCCESSFUL ;
AUDIT DELETE ON <table_name> BY ACCESS WHENEVER NOT SUCCESSFUL ;
AUDIT DELETE ON <table_name> BY SESSION ; -- regardless of success or failure
AUDIT DELETE ON <table_name> BY ACCESS ; -- regardless of success or failure
#######################################
##### Dropping temp segments
#######################################
SQL> select ts#, name from sys.ts$ where name like 'TEMP%';
alter session set events 'immediate trace name drop_segments level <TS number + 1>';
alter session set events 'immediate trace name drop_segments level 5';
alter session set events 'immediate trace name drop_segments level 101';
alter session set events 'immediate trace name drop_segments level 115';
alter session set events 'immediate trace name drop_segments level 99';
alter session set events 'immediate trace name drop_segments level 118';
alter session set events 'immediate trace name drop_segments level 100';
#######################################
##### OLAP Cubes
#######################################
##### Workaround to fix the OLAP repoistory issue with a particular CUBE
##### (with suitable replacement of my_aw).
begin
dbms_aw.execute(q'!
aw attach my_aw RW
_repos clear
update
commit
aw detach my_aw
aw attach my_aw RW
allcompile
update
commit
aw detach my_aw
!');
end;
/
-- Clear and re-calculate the Cube Aggregations
BEGIN
DBMS_CUBE.BUILD(q'!
CRM_DW_DMZ_OWN.TRANSACTION_CUBE USING
(
FOR CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM
WHERE CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM.DIM_KEY IS DESCENDANT OR SELF OF 'CM Apr 2013'
WITHIN CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM.CALENDAR_HIERARCHY
BUILD(CLEAR AGGREGATES, SOLVE)
)
!'
, add_dimensions=>false
);
END;
/
##### Cube build Logging.
#####
-- Create Logging Tables
BEGIN
SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_build , tblname => dbms_cube_log.default_name(dbms_cube_log.type_build) );
SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_dimension_compile, tblname => dbms_cube_log.default_name(dbms_cube_log.type_operations) );
SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_operations , tblname => dbms_cube_log.default_name(dbms_cube_log.type_dimension_compile) );
SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_rejected_records , tblname => dbms_cube_log.default_name(dbms_cube_log.type_rejected_records) );
END;
/
-- Query Default logging table names
SELECT dbms_cube_log.default_name(dbms_cube_log.type_build) "Build"
, dbms_cube_log.default_name(dbms_cube_log.type_operations) "Operations"
, dbms_cube_log.default_name(dbms_cube_log.type_dimension_compile) "DimensionCompile"
, dbms_cube_log.default_name(dbms_cube_log.type_rejected_records) "RejectedRecords"
FROM dual;
-- Enable CUBE Build Logging (*** in current session ***)
/* When you pass first parameter (log_type) as NULL it set for dimension_compile, rejected_records,operations log_types. */
/* So if you passed log_type = NULL and log_location = CUBE_BUILD_LOG, it will try to log non-build records mentioned above
as to this table.
*/
BEGIN
dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_OPERATIONS ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM);
dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_REJECTED_RECORDS ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM);
dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_DIMENSION_COMPILE ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM);
dbms_cube_log.set_parameter(DBMS_CUBE_LOG.TYPE_REJECTED_RECORDS ,1,100); -- Set Max Errors to log
dbms_cube_log.set_parameter(DBMS_CUBE_LOG.TYPE_DIMENSION_COMPILE ,1,100); -- Set Max Errors to log
END ;
/
-- Cube build
BEGIN
DBMS_CUBE.BUILD(
method => 'CCCCCCCCCCCC'
, refresh_after_errors => false
, parallelism => 8
, atomic_refresh => false
, automatic_order => false
, add_dimensions => false
, scheduler_job => 'OLAP$_' || CRM_DW_DMZ_OWN.CUBE_BUILD_ID.nextval
, master_build_id => CRM_DW_DMZ_OWN.CUBE_BUILD_ID.nextval
, nested => false
, script => '
CRM_DW_DMZ_OWN.CONTACT_PARTNERSHIPS_DIM USING
(
LOAD NO SYNCH,
COMPILE SORT
),
CRM_DW_DMZ_OWN.TRANSACTION_CUBE'
);
END;
/
-- Check Progress
ALTER SESSION SET CURRENT_SCHEMA = CRM_DW_DMZ_OWN;
SELECT * FROM dba_sequences WHERE sequence_owner = SYS_CONTEXT('USERENV','CURRENT_SCHEMA');
SELECT * FROM dba_scheduler_running_jobs;
SELECT * FROM CRM_DW_DMZ_OWN.cube_build_log;
SELECT * FROM CRM_DW_DMZ_OWN.cube_dimension_compile ;
SELECT * FROM CRM_DW_DMZ_OWN.cube_operations_log;
SELECT * FROM CRM_DW_DMZ_OWN.cube_rejected_records;
SELECT TIME, build_id, slave_number, command, status, build_object, build_object_Type ,partition, output
FROM cube_build_log
where build_id = 46
order by time desc;
SELECT build_id
, slave_number
, build_object
, build_object_type
, partition
, command
, NVL(MAX(DECODE(status , 'COMPLETED',NVL(TIME,SYSDATE), NULL)),systimestamp) - MIN(DECODE(status , 'STARTED',TIME, NULL)) duration
, MIN(DECODE(status , 'STARTED',TIME, NULL)) started
, MAX(DECODE(status , 'COMPLETED',TIME, NULL)) completed
FROM cube_build_log
WHERE build_id = 46
GROUP BY build_id , slave_number , build_object , build_object_type , partition , command
ORDer by started desc
;
SELECT build_id
, slave_number
, build_object
, build_object_type
, partition
, command
, NVL(MAX(DECODE(status , 'COMPLETED',NVL(TIME,SYSDATE), NULL)),systimestamp) - MIN(DECODE(status , 'STARTED',TIME, NULL)) duration
, MIN(DECODE(status , 'STARTED',TIME, NULL)) started
, MAX(DECODE(status , 'COMPLETED',TIME, NULL)) completed
FROM cube_build_log
WHERE build_id = 46
AND command = 'BUILD'
GROUP BY build_id , slave_number , build_object , build_object_type , PARTITION , command
HAVING MAX(DECODE(status , 'COMPLETED',TIME, NULL)) IS NOT NULL
AND partition IS NOT NULL
ORDer by started desc
;
SELECT a.build_id, a.slave_number, s.status, a.command,a.time, a.partition, a.build_object, a.build_object_type , s.event, s.status
FROM cube_build_log a
JOIN DBA_SCHEDULER_RUNNING_JOBS j ON j.owner = a.owner AND a.scheduler_job = j.job_name
JOIN gv$session s ON s.inst_id = j.running_instance AND j.session_id = s.SID
WHERE 1=1
AND A.TIME = (SELECT MAX(TIME) FROM cube_build_log c WHERE C.build_id = A.build_id AND A.PARTITION = c.PARTITION GROUP BY c.build_id, c.PARTITION)
AND NOT EXISTS (SELECT 1 FROM cube_build_log b WHERE b.build_id = A.build_id AND A.PARTITION = b.PARTITION AND A.command = b.command
AND b.status = 'COMPLETED')
AND A.build_id = 46
ORDER BY a.TIME DESC
;
#######################################
##### Analytic Workspace (AW , AWS)
#######################################
select * from dba_aws where owner = 'CRM_DW_OWN';
select * from dba_aw_obj where owner = 'CRM_DW_OWN' and aw_name = 'MSCRM_DMART';
select * from dba_aw_prop where owner = 'CRM_DW_OWN';
select * from v$aw_session_info;
select * from v$aw_olap;
select * from v$aw_longops;
select * from v$aw_aggregate_op;
select * from v$aw_allocate_op;
oradebug setmypid
oradebug tracefile_name
oradebug close_trace
oradebug ulimit
MOS Note ID [742375.1] - How to Recreate an AW Through Export and Import Commands
dbms_aw.execute
Syntax
EXPORT export_item TO EIF FILE file-name [LIST] [NOPROP] -
[NOREWRITE|REWRITE] [FILESIZE n [K, M, or G]] -
[NOTEMPDATA] [NLS_CHARSET charset-exp] [AGGREGATE | NOAGGR] -
[API | NOAPI]
where
AGGREGATE - Export aggregated data. (Default behavior.)
NOAGGR - Do not export aggregated data.
-- Exporting AW Metadata and Data to an EIF file
-- The keyword 'API' makes sure that all metadata is exported with the EIF.
-- Such an EIF file can be imported again with the same option.
-- The keyword 'NOAPI' prevents the export of the metadata and also the creation of such metadata during import.
-- NOAPI also the default setting, so make sure you make the right choice during the export. Also note that there is no way
-- to re-create the metadata for an EIF file exported with the 'NOAPI' setting.
exec dbms_aw.execute('export all to eif file ''DIRECTORY_OBJECT/export.eif'' api');
-- Importing AW Metadata and Data to an EIF file
exec dbms_aw.execute('aw create AWNAME');
exec dbms_aw.execute('aw attach AWNAME rw');
exec dbms_aw.execute('import all from eif file ''my_dir/export_file_name.eif'' ');
exec dbms_aw.execute('update');
commit;
exec dbms_aw.execute('aw detach AWNAME');
-- Exporting AW Metadata to an XML file
exec dbms_aw.execute('aw attach <owner>.<workspace_name> rw');
exec dbms_cube.export_xml_to_file( object_ids => '<owner>.<workspace_name>.AW', output_dirname => 'DATA_DUMP_DIR2', output_filename => 'AW_Metadata.xml');
exec dbms_aw.execute('aw detach mscrm_dmart');
-- Exporting AW Metadata to an XML file (Advanced)
set serveroutput on
DECLARE
optionsClob CLOB;
fh utl_file.file_type;
buffer VARCHAR2(4000);
amount INTEGER := 4000;
BEGIN
dbms_lob.createtemporary(optionsClob, false, dbms_lob.call);
dbms_cube.create_export_options(out_options_xml => optionsClob
, target_version => '11.2.0.2' -- Default NULL
, suppress_owner => TRUE -- Default FALSE
, suppress_namespace => TRUE -- Default FALSE
, preserve_table_owners => TRUE -- Default FALSE
, metadata_changes => NULL -- Default NULL
);
fh := utl_file.fopen( location => 'DATA_DUMP_DIR2'
, filename => 'AW_export_options.xml'
, open_mode => 'w'
, max_linesize => '32767'
);
dbms_lob.read(optionsClob, amount, 1,buffer);
utl_file.put_line ( fh, buffer );
utl_file.fclose ( fh);
dbms_output.put_line(optionsClob);
END;
/
exec dbms_aw.execute('aw attach <owner>.<workspace_name> ro');
exec dbms_cube.export_xml_to_file( object_ids => '<owner>.<workspace_name>.AW'
, options_dirname => 'DATA_DUMP_DIR2'
, options_filename => 'AW_export_options.xml'
, output_dirname => 'DATA_DUMP_DIR2'
, output_filename => 'AW_Metadata.xml'
);
exec dbms_aw.execute('aw detach mscrm_dmart');
-- Importing AW Metadata from an XML file
exec dbms_aw.execute('aw delete <owner>.<workspace_name>');
exec dbms_aw.execute('aw create <owner>.<workspace_name>');
exec dbms_aw.execute('aw attach <owner>.<workspace_name> rw');
exec dbms_cube.import_xml( dirname=>'DATA_DUMP_DIR2', filename =>'AW_Metadata.xml');
exec dbms_aw.execute('aw detach mscrm_dmart');
#######################################
##### Proxy Connect Syntax
#######################################
alter user pm_own grant connect through vgupta;
connect vgupta[PROXYUSER]/password@SID
##########################
DiskGroup-TableSpace Size
##########################
select tablespace_name
, SUBSTR(file_name,1, INSTR(file_name,'/') - 1) DiskGroup
, count(1) File_count
, ROUND(sum(bytes)/1024/1024/1024) size_GB
from dba_data_files
where tablespace_name = 'PM_DATA_BIG'
group by tablespace_name
, SUBSTR(file_name,1, INSTR(file_name,'/') - 1)
ORDER BY 1,2
;
select t.name
, SUBSTR(f.name,1, INSTR(f.name,'/') - 1) DiskGroup
, count(1) File_count
, ROUND(sum(f.bytes)/1024/1024/1024) size_GB
from v$datafile f, v$tablespace t
where f.ts# = t.ts#
and t.name = 'PM_DATA_BIG'
group by t.name
, SUBSTR(f.name,1, INSTR(f.name,'/') - 1)
ORDER BY 1,2
;
--*************************************
-- DBMS Scheduler Job Queries
--*************************************
exec dbms_scheduler.stop_job(job_name => 'ADV_OWN.ADV_DEAL_COLLAT_INTEX_VW_JOB' ,force => TRUE);
exec dbms_scheduler.run_job (job_name => 'ADV_OWN.ADV_DEAL_COLLAT_INTEX_VW_JOB' ,use_current_session => FALSE);
-- Scheduler Jobs
SELECT * FROM dba_scheduler_jobs
WHERE 1=1
AND owner LIKE '%%'
AND JOB_NAME LIKE '%%'
;
-- Scheduler Jobs Run Details
SELECT * FROM dba_scheduler_job_run_details
WHERE 1=1
AND owner LIKE '%%'
AND JOB_NAME like '%%'
ORDER BY log_date DESC
;
--
SELECT * FROM DBA_SCHEDULER_RUNNING_JOBS ;
SELECT * FROM DBA_SCHEDULER_JOBS ;
SELECT * FROM DBA_SCHEDULER_JOB_RUN_DETAILS ;
SELECT * FROM DBA_SCHEDULER_JOB_LOG ;
--
SELECT * FROM DBA_SCHEDULER_GLOBAL_ATTRIBUTE ;
--
SELECT * FROM DBA_SCHEDULER_GROUPS ;
SELECT * FROM DBA_SCHEDULER_GROUP_MEMBERS ;
SELECT * FROM DBA_SCHEDULER_JOB_CLASSES ;
--
SELECT * FROM DBA_SCHEDULER_SCHEDULES ;
SELECT * FROM DBA_SCHEDULER_WINDOW_GROUPS ;
SELECT * FROM DBA_SCHEDULER_WINGROUP_MEMBERS ;
SELECT * FROM DBA_SCHEDULER_WINDOWS ;
SELECT * FROM DBA_SCHEDULER_WINDOW_DETAILS ;
SELECT * FROM DBA_SCHEDULER_WINDOW_LOG ;
--
SELECT * FROM DBA_SCHEDULER_PROGRAMS ;
SELECT * FROM DBA_SCHEDULER_PROGRAM_ARGS ;
--
SELECT * FROM DBA_SCHEDULER_JOB_ROLES ;
SELECT * FROM DBA_SCHEDULER_JOB_ARGS ;
SELECT * FROM DBA_SCHEDULER_JOB_DESTS ;
SELECT * FROM DBA_SCHEDULER_NOTIFICATIONS ;
SELECT * FROM DBA_SCHEDULER_REMOTE_DATABASES ;
SELECT * FROM DBA_SCHEDULER_REMOTE_JOBSTATE ;
SELECT * FROM DBA_SCHEDULER_RUNNING_CHAINS ;
--
SELECT * FROM DBA_SCHEDULER_CHAINS ;
SELECT * FROM DBA_SCHEDULER_CHAIN_RULES ;
SELECT * FROM DBA_SCHEDULER_CHAIN_STEPS ;
SELECT * FROM DBA_SCHEDULER_CREDENTIALS ;
SELECT * FROM DBA_SCHEDULER_DB_DESTS ;
SELECT * FROM DBA_SCHEDULER_DESTS ;
SELECT * FROM DBA_SCHEDULER_EXTERNAL_DESTS ;
SELECT * FROM DBA_SCHEDULER_FILE_WATCHERS ;
--******************************************************************************
-- DataGuard Rollforward using Incremental Backups
-- http://docs.oracle.com/cd/E11882_01/server.112/e41134/rman.htm#SBYDB00759
--******************************************************************************
--------------------------------
-- Datafile Queries
--------------------------------
SQL>
SELECT TO_CHAR(TRUNC(CHECKPOINT_TIME,'HH24'),'DD-MON-YYYY HH24') "CHECKPOINT_TIME (Hour)", count(1) from v$datafile_header group by TRUNC(CHECKPOINT_TIME,'HH24') order by 1 asc;
-- On Standby database
SQL>
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
set numf 99999999999999999
SELECT CURRENT_SCN FROM V$DATABASE;
-- On a standby that has widespread nologging changes, query the V$DATAFILE view to record the lowest FIRST_NONLOGGED_SCN
SELECT MIN(FIRST_NONLOGGED_SCN) FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN>0 ;
-- On a standby that has nologging changes on a subset of datafiles, query the V$DATAFILE view, as follows:
SELECT FILE#, FIRST_NONLOGGED_SCN FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN > 0;
-- On Primary database
RMAN>
CONFIGURE DEVICE TYPE DISK PARALLELISM 16;
BACKUP INCREMENTAL FROM SCN 685401030526 DATABASE FORMAT '/export/ora_stage/vishal/incremental_backup/ForStandby_%U' tag 'FORSTANDBY2' FILESPERSET 10 MAXSETSIZE 32G;
-- or If the standby has nologging changes on a subset of datafiles,
-- then create an incremental backup for each datafile listed in the FIRST_NONLOGGED_SCN column (recorded in step 2), as follows:
RMAN>
BACKUP INCREMENTAL FROM SCN 682842365996 DATAFILE 4 FORMAT '/tmp/ForStandby_%U' TAG 'FORSTANDBY2';
BACKUP INCREMENTAL FROM SCN 682842365996 DATAFILE 5 FORMAT '/tmp/ForStandby_%U' TAG 'FORSTANDBY2';
-- On Standby database
-- Transfer backups if not available via NFS mount point
RMAN>
CATALOG START WITH '/export/ora_stage/vishal/incremental_backup/ForStandby' NOPROMPT;
STARTUP NOMOUNT;
-- Controlfile restore is only needed if there is archivelog gap.
-- RMAN> RESTORE STANDBY CONTROLFILE FROM TAG 'FORSTANDBY'; --
-- catalog all datafile
-- Switch datafiles
RMAN>
ALTER DATABASE MOUNT;
RECOVER DATABASE NOREDO;
-- Check nologging operations
SELECT MIN(FIRST_NONLOGGED_SCN) FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN>0 ;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT FROM SESSION;
-- On Standby database
RMAN>
crosscheck backupset TAG 'FORSTANDBY2';
delete backupset TAG 'FORSTANDBY2';
--***************************************************
--*************************************
-- DataGuard Corruption Recovery
-- Resolving ORA-752 or ORA-600 [3020] During Standby Recovery (Doc ID 1265884.1)
--*************************************
/*
Errors in file /u01/app/oracle/diag/rdbms/pr01zfs/pr01zfs1/trace/pr01zfs1_mrp0_105546.trc (incident=416587):
ORA-00600: internal error code, arguments: [3020], [2238], [98776], [805405144], [], [], [], [], [], [], [], []
ORA-10567: Redo is inconsistent with data block (file# 2238, block# 98776, file offset is 809172992 bytes)
ORA-10564: tablespace PM_IDX
ORA-01110: data file 2238: '/zfssa/stby/backup2/pr01pimi/datafile/PR01ZFS/datafile/o1_mf_pm_idx__ec3466ce_.dbf'
ORA-10561: block type 'TRANSACTION MANAGED INDEX BLOCK', data object# 5030566
Incident details in: /u01/app/oracle/diag/rdbms/pr01zfs/pr01zfs1/incident/incdir_416587/pr01zfs1_mrp0_105546_i416587.trc
*/
/* If below command take too long to cancel MRP, then kill PR00 parallel slave recovery process using alter system command at database level. Avoid kill -9 */
alter database recover managed standby database cancel immediate;
-- By default following will start serial recovery
alter database recover automatic standby database TEST allow 99 corruption;
-- alter database recover automatic standby database until cancel allow 10 corruption ;
-->> Wait until you see one of the follwoing message in alert log
-->> CORRUPTING BLOCK 1134925 OF FILE 48 AND CONTINUING RECOVERY
-->> ORA-01578: ORACLE data block corrupted (file # 48, block # 1134925)
-->> Now CTRL-C to cancel recovery
-- alter database recover managed standby database disconnect ;
alter database recover cancel;
-- Rest problematic datafiles
alter database recover managed standby database using current logfile disconnect ;
alter database recover managed standby database cancel immediate;
alter database open read only;
-->> Copy the corrupted datafile from primary or another standby.
RMAN>
run
{
allocate channel c1 device type disk;
allocate channel c2 device type disk;
allocate channel c3 device type disk;
allocate channel c4 device type disk;
allocate channel c5 device type disk;
sql 'alter database recover managed standby database cancel';
catalog start with '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1514_g2nui9rc.dbf' noprompt;
catalog start with '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1666_g3nuib6g.dbf' noprompt;
restore datafile 1514,1666 ;
change datafilecopy '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1514_g2nui9rc.dbf' uncatalog;
change datafilecopy '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1666_g3nuib6g.dbf' uncatalog;
sql 'alter database recover managed standby database using current logfile disconnect ';
}
run{
switch datafile 2201 to datafilecopy '/zfssa/stby/backup2/pr01pimi/datafile/PR01ZFS/datafile/o1_mf_pm_idx__f92b4506_2.dbf';
}
--*************************************
-- DataGuard Corruption File recovery
--*************************************
RMAN>
restore file 1234;
sql 'alter databaser recover managed standby database using current logfile disconnect';
--****************************************
-- Transient Type queries
--****************************************
select TO_CHAR(CTIME,'YYYY-MM-DD HH24') ctime
, count(1)
from sys.obj$ o, sys.type$ t, dba_users u
where o.oid$ = t.tvoid and
u.user_id = o.owner# and
bitand(t.properties,8388608) = 8388608
and (sysdate-o.ctime) > 0.0007
GROUP BY TO_CHAR(CTIME,'YYYY-MM-DD HH24')
ORDER BY 1
;
select count(1)
, TO_CHAR(min(CTIME),'DD-MON-YY HH24:MI:SS') min_ctime
, TO_CHAR(max(ctime),'DD-MON-YY HH24:MI:SS') max_ctime
from sys.obj$ o, sys.type$ t, dba_users u
where o.oid$ = t.tvoid and
u.user_id = o.owner# and
bitand(t.properties,8388608) = 8388608
and (sysdate-o.ctime) > 0.0007
;
--****************************************
-- How to diagnose following wait events
--****************************************
--****************************************
-- 'cursor: pin S wait on X'
-- Doc ID 1298015.1 - WAITEVENT: "cursor: pin S wait on X" Reference Note
-- Doc ID 1349387.1 - Troubleshooting 'cursor: pin S wait on X' waits.
--****************************************
-- SQL causing the issue (P1)
SELECT sql_id, sql_text, version_count
FROM GV$SQLAREA
WHERE HASH_VALUE in (select p1 from gv$session where event = 'cursor: pin S wait on X');
-- SID Holding Mutex (P2)
SELECT inst_id
, sid
, decode(trunc(P2/4294967296),
0,trunc(P2/65536),
trunc(P2/4294967296)) SID_HOLDING_MUTEX
, status
, username , osuser , machine , program
, module, action
, event
FROM gv$session
WHERE event = 'cursor: pin S wait on X' ;
-- Oracle Code Location (P3)
SELECT s.sid
, m.inst_id
, m.MUTEX_TYPE
, m.LOCATION
, m.wait_time
, m.sleeps
FROM sys.x$mutex_sleep m
, gv$session s
WHERE s.event = 'cursor: pin S wait on X'
AND m.inst_id = s.inst_id
AND m.mutex_type like 'Cursor Pin%'
and m.location_id= decode(trunc(s.P3/4294967296),
0,trunc(s.P3/65536),
trunc(s.P3/4294967296)
)
;
--****************************************
-- 'library cache lock'
-- MOS Note Id 122793.1 - How to Find which Session is Holding a Particular Library Cache Lock
-- Troubleshooting Library Cache: Lock, Pin and Load Lock (Doc ID 444560.1)
--****************************************
select inst_id,sid,saddr, status, username, osuser,machine , program
-- , BLOCKING_INSTANCE, BLOCKING_SESSION, BLOCKING_SESSION_STATUS
, FINAL_BLOCKING_INSTANCE , FINAL_BLOCKING_SESSION, FINAL_BLOCKING_SESSION_STATUS
, event from gv$session where event= 'library cache lock';
select kgllkhdl Handle,kgllkreq Request, kglnaobj Object
from sys.x$kgllk where kgllkses in (select saddr from gv$session where event= 'library cache lock')
and kgllkreq > 0;
--****************************************
-- 'library cache pin'
-- MOS Note Id 780514.1 - How to Find the Blocker of the 'library cache pin' in a RAC environment?
-- WAITEVENT: "library cache pin" Reference Note [ID 34579.1]
-- Troubleshooting Library Cache: Lock, Pin and Load Lock (Doc ID 444560.1)
--****************************************
--****************************************
-- Shrink Datafiles
--****************************************
select 'alter index ' || owner || '.' || segment_name || ' rebuild ' || nvl2(partition_name,' partition ' || partition_name ,' ') || ' online parallel 10;' , block_id, blocks
from
(select owner, segment_name, partition_name, max(block_id) block_id, max(blocks) blocks from dba_extents where file_id = 1909 group by owner, segment_name, partition_name
)
order by block_id desc
;
set lines 200
set pages 400
UNDEFINE tablespace_name
DEFINE tablespace_name
select /*+ CHOOSE */ 'alter database datafile ''' || file_name || ''' resize ' || TO_CHAR(CEIL(HWM)+50) || 'M; ---- Freeable ' || freeable || 'MB'
from
(
Select /*+ CHOOSE */ t.tablespace_name
, d.status "Status"
, GREATEST(d.maxbytes,d.bytes)/1024/1024 maxsize
, d.bytes/1024/1024 alloc
, NVL((hwm.bytes)/1024/1024,0) HWM
, ROUND((d.bytes - NVL(f.bytes,0))/1024/1024,2) used
, ROUND(decode(f.bytes, NULL,0, f.bytes)/1024/1024,2) free
, ROUND( (d.bytes - GREATEST( NVL(hwm.bytes,0), (d.bytes - NVL(f.bytes,0)) ) ) /1024/1024,2) freeable
, d.INCREMENT_BY/1024/1024 Increment_by
, t.pct_increase pct_increase
, d.file_id
, SUBSTR(d.file_name,1,80) file_name
FROM DBA_DATA_FILES d , DBA_TABLESPACES t
, (SELECT /*+ CHOOSE */ tablespace_name
, file_id
, sum(bytes) bytes
FROM DBA_FREE_SPACE f
WHERE tablespace_name LIKE '&&tablespace_name'
GROUP BY tablespace_name, file_id) f
, (Select /*+ CHOOSE */ file_id,
NVL( max((block_id + blocks - 1 ) * p.value),0) bytes
from dba_extents
, v$system_parameter p
WHERE tablespace_name LIKE '&&tablespace_name'
AND p.name = 'db_block_size'
GROUP BY file_id
) hwm
WHERE t.tablespace_name = d.tablespace_name
AND f.tablespace_name(+) = d.tablespace_name
AND f.file_id(+) = d.file_id
AND hwm.file_id(+) = d.file_id
AND d.tablespace_name LIKE '&&tablespace_name'
UNION ALL
SELECT /*+ CHOOSE */ tf.tablespace_name
, tf.status
, GREATEST(tf.maxbytes,tf.bytes)/1024/1024 maxsize
, (tf.bytes/1024/1024) alloc
, (tf.bytes/1024/1024) HWM
, ROUND(ts.bytes_used/1024/1024) used
, ROUND(ts.bytes_free/1024/1024) free
, ROUND((tf.bytes - tf.bytes)/1024/1024,2) freeable
, tf.INCREMENT_BY/1024/1024 Increment_by
, 0 pct_increase
, tf.file_id
, SUBSTR(tf.file_name,1,80) file_name
FROM dba_temp_files tf, V$TEMP_SPACE_HEADER ts
WHERE ts.file_id = tf.file_id
and ts.tablespace_name LIKE '&&tablespace_name'
) c
WHERE freeable > 100
AND file_name LIKE '+%'
ORDER BY freeable desc, tablespace_name,c.file_id asc;
--****************************************
-- SCAN listener
--***************************************
for i in 1 2 3 ;
do
server=`crsctl stat res ora.LISTENER_SCAN$i.lsnr |grep STATE|awk '{print $NF}'`;
ssh $server "grep '06-MAR-2013 07' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/$server/listener_scan$i/trace/listener_scan$i.log | grep 'pr01pimi_adv_grid_con' "
done
To Trace SRVCTL issues. Set the debug flag on.
export SRVM_TRACE=TRUE
--****************************************
-- SMON and CJQ Memory Leak, increase Linux level process memory
--***************************************
vm.max_map_count = 200000 # (Default is 65536)
cat /proc/sys/vm/max_map_count
echo 200000 > /proc/sys/vm/max_map_count
Process level max PGA memory
*******************************
= _realfree_heap_pagesize_hint * vm.max_map_count
= 64k (default value) * 64k (default value)
= 4GB
Increase either
Bug 11852492 : ORA-4030 OCCURS WHEN PGA EXCEEDS 4GB.
###############################################################
# As per Automatic Memory Management (AMM) on 11g [ID 443746.1]
# To fix ORA-4030 out of process memory errors in SMON process,
# which were resulting in database instance crash
#
vm.max_map_count = 200000
--****************************************
--****************************************
--**** CJQ Workaround
--****************************************
select * From dba_scheduler_running_jobs;
--ORA-01405: Fetched Column Value Is Null For An Advanced Queue [ID 1162862.1]
select owner, name, queue_type, enqueue_enabled, dequeue_enabled from dba_queues
where owner='SYS' and queue_table='SYS$SERVICE_METRICS_TAB' and queue_type='NORMAL_QUEUE';
--Note1: Replace '<owner>.<name>' appropriately with your affected queue table:
exec dbms_aqadm.stop_queue(queue_name => 'SYS.SYS$SERVICE_METRICS');
delete from sys.aq$_SYS$SERVICE_METRICS_TAB_L where msgid ='00000000000000000000000000000000';
commit;
DECLARE
rer po dbms_aqadm.aq$_purge_options_t;
BEGIN
po.block := TRUE;
DBMS_AQADM.PURGE_QUEUE_TABLE(
queue_table => 'SYS.SYS$SERVICE_METRICS_TAB',
purge_condition => 'qtview.queue = ''SYS.AQ$SYS$SERVICE_METRICS_TAB''
and qtview.msg_state = ''PROCESSED''',
purge_options => po);
commit;
END;
/
select count(*) from SYS.aq$_SYS$SERVICE_METRICS_TAB_L where msgid ='00000000000000000000000000000000';
exec dbms_aqadm.start_queue(queue_name => 'SYS.SYS$SERVICE_METRICS')
select owner, name, queue_type, enqueue_enabled, dequeue_enabled from dba_queues
where owner='SYS' and queue_table='SYS$SERVICE_METRICS_TAB' and queue_type='NORMAL_QUEUE';
select * From dba_scheduler_running_jobs;
###########################
# Database Events
###########################
--------------------------------------
--dNFS (Direct NFS) releated events
--------------------------------------
event "19392 trace name context forever, level 8" # kgnfs (dNFs)
event "19394 trace name context forever, level 8" # skgnfs (Normal NFS)
event "19396 trace name context forever, level 2" # kgodm (ODM Related)
event "19398 trace name context forever, level 128" #mount errors
alter system set
event = '19392 trace name context forever, level 8','19394 trace name context forever, level 8','19396 trace name context forever, level 2','19398 trace name context forever, level 128'
sid='*' scope=spfile ;
Network NSLOOKUP
##################
#Reverse Lookup
##############
for i in {1..254};
do
for j in {1..254};
do
host="144.77.$i.$j"
echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ `ping -c1 -i1 -w1 $host >/dev/null 2>&1 ; echo $?` ] -eq 0 ]; then echo Pingable ; fi;)
done
done
for i in {1..254};
do
for j in {1..254};
do
host="192.168.$i.$j"
echo $host - ` nslookup $host |grep name |cut -d= -f2 `
done
done
for j in {1..254}
do
host="10.201.119.$j"
echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ "`ping -c1 -w1 $host >/dev/null 2>&1 ; echo $?`" -eq 0 ] ; then echo Pingable ; fi;)
done
%
# Filter for specific named hosts
for j in {1..254};
do
for i in 120 129
do
host="192.168.$i.$j";
if [[ "`nslookup $host |grep name |cut -d= -f2`" == *dm01* ]];
then
echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ "`ping -c1 -w1 $host >/dev/null 2>&1 ; echo $?`" -eq 0 ] ; then echo Pingable ; fi;)
fi
done
done
HangAnalyze
#################
Oracle Performance Diagnostic Guide (OPDG) (Doc ID 390374.1)
How to Collect Diagnostics for Database Hanging Issues (Doc ID 452358.1)
sqlplus -prelim '/ as sysdba'
oradebug setorapname diag
oradebug unlimit
oradebug -g all hanganalyze 3
oradebug -g all dump systemstate 258
-- <<wait for 3 mins>>
oradebug -g all hanganalyze 3
oradebug -g all dump systemstate 258
-- Upload the *diag* process traces from all the instances
########################################
Generate Service creation commands
########################################
########################################
# 11g
########################################
database=ts01mtgz
#tmpfile=/export/ora_stage/vishal/${database}_services.txt
tmpfile=/tmp/${database}_services.txt
srvctl config service -d $database > $tmpfile
for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`;
do
servicename="`grep -A18 "Service name: $i" $tmpfile|grep 'Service name'|cut -d: -f2`";
preferred_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Preferred instances'|cut -d: -f2`";
available_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Available instances'|cut -d: -f2`";
service_role="`grep -A18 "Service name: $i" $tmpfile|grep 'Service role'|cut -d: -f2`";
aq_notification="`grep -A18 "Service name: $i" $tmpfile|grep 'AQ HA notifications'|cut -d: -f2`";
failover_type="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover type'|cut -d: -f2`";
failover_method="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover method'|cut -d: -f2`";
failover_delay="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover delay'|cut -d: -f2`";
failover_retries="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover retries'|cut -d: -f2`";
taf_policy="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF policy specification'|cut -d: -f2`";
clb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Connection Load Balancing Goal'|cut -d: -f2`";
rlb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Runtime Load Balancing Goal'|cut -d: -f2`";
dtp_transaction="`grep -A18 "Service name: $i" $tmpfile|grep 'DTP transaction'|cut -d: -f2`";
### Display commands for adding services
echo "srvctl add service -d $database -s $servicename -r $preferred_instances -a $available_instances -P $taf_policy -l $service_role -e $failover_type -m $failover_method -w $failover_delay -z $failover_retries -j $clb_goal -B $rlb_goal -x $dtp_transaction -q $aq_notification"
### Display commands for relocating services
#echo "srvctl modify service -d $database -s $servicename -n -i $preferred_instances -a $available_instances "
done
### Display commands for removing services
for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`;
do
echo srvctl remove service -d $database -s $i
done
Usage: srvctl add service -d <db_unique_name> -s <service_name> {-r "<preferred_list>" [-a "<available_list>"] [-P {BASIC | NONE | PRECONNECT}] | -g <pool_name> [-c {UNIFORM | SINGLETON}] } [-k <net_num>] [-l [PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]] [-y {AUTOMATIC | MANUAL}] [-q {TRUE|FALSE}] [-x {TRUE|FALSE}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}] [-z <failover_retries>] [-w <failover_delay>] [-t <edition>] [-f]
-d <db_unique_name> Unique name for the database
-s <service> Service name
-r "<preferred_list>" Comma separated list of preferred instances
-a "<available_list>" Comma separated list of available instances
-g <pool_name> Server pool name
-c {UNIFORM | SINGLETON} Service runs on every active server in the server pool hosting this service (UNIFORM) or just one server (SINGLETON)
-k <net_num> network number (default number is 1)
-P {NONE | BASIC | PRECONNECT} TAF policy specification
-l <role> Role of the service (primary, physical_standby, logical_standby, snapshot_standby)
-y <policy> Management policy for the service (AUTOMATIC or MANUAL)
-e <Failover type> Failover type (NONE, SESSION, or SELECT)
-m <Failover method> Failover method (NONE or BASIC)
-w <integer> Failover delay
-z <integer> Failover retries
-t <edition> Edition (or "" for empty edition value)
-j <clb_goal> Connection Load Balancing Goal (SHORT or LONG). Default is LONG.
-B <Runtime Load Balancing Goal> Runtime Load Balancing Goal (SERVICE_TIME, THROUGHPUT, or NONE)
-x <Distributed Transaction Processing> Distributed Transaction Processing (TRUE or FALSE)
-q <AQ HA notifications> AQ HA notifications (TRUE or FALSE)
Usage: srvctl add service -d <db_unique_name> -s <service_name> -u {-r "<new_pref_inst>" | -a "<new_avail_inst>"} [-f]
-d <db_unique_name> Unique name for the database
-s <service> Service name
-u Add a new instance to service configuration
-r <new_pref_inst> Name of new preferred instance
-a <new_avail_inst> Name of new available instance
-f Force the add operation even though a listener is not configured for a network
-h Print usage
Service name: bt01pimi_tools_con
Service is enabled
Server pool: bt01pimi_bt01pimi_tools_con
Cardinality: 3
Disconnect: false
Service role: PRIMARY
Management policy: AUTOMATIC
DTP transaction: false
AQ HA notifications: false
Failover type: SESSION
Failover method: BASIC
TAF failover retries: 0
TAF failover delay: 0
Connection Load Balancing Goal: LONG
Runtime Load Balancing Goal: NONE
TAF policy specification: BASIC
Edition:
Preferred instances: bt01pimi1,bt01pimi2,bt01pimi3
Available instances:
#############################################
# Generate script from 11g output for 12c
#############################################
database=al01pimn
#tmpfile=/export/ora_stage/vishal/${database}_services.txt
tmpfile=/tmp/${database}_services.txt
srvctl config service -d $database > $tmpfile
for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`;
do
servicename="`grep -A18 "Service name: $i" $tmpfile|grep 'Service name'|cut -d: -f2`";
preferred_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Preferred instances'|cut -d: -f2`";
available_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Available instances'|cut -d: -f2`";
service_role="`grep -A18 "Service name: $i" $tmpfile|grep 'Service role'|cut -d: -f2`";
aq_notification="`grep -A18 "Service name: $i" $tmpfile|grep 'AQ HA notifications'|cut -d: -f2`";
failover_type="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover type'|cut -d: -f2`";
failover_method="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover method'|cut -d: -f2`";
failover_delay="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover delay'|cut -d: -f2`";
failover_retries="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover retries'|cut -d: -f2`";
taf_policy="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF policy specification'|cut -d: -f2`";
clb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Connection Load Balancing Goal'|cut -d: -f2`";
rlb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Runtime Load Balancing Goal'|cut -d: -f2`";
dtp_transaction="`grep -A18 "Service name: $i" $tmpfile|grep 'DTP transaction'|cut -d: -f2`";
### Display commands for adding services
echo "srvctl add service -d $database -s $servicename -r $preferred_instances -a $available_instances -P $taf_policy -l $service_role -e $failover_type -m $failover_method -w $failover_delay -z $failover_retries -j $clb_goal -B $rlb_goal -x $dtp_transaction -q $aq_notification"
### Display commands for relocating services
#echo "srvctl modify service -d $database -s $servicename -n -i $preferred_instances -a $available_instances "
done
### Display commands for removing services
for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`;
do
echo srvctl remove service -d $database -s $i
done
Usage: srvctl add service -db <db_unique_name> -service <service_name> {-preferred "<preferred_list>" [-available "<available_list>"] [-tafpolicy {BASIC | NONE | PRECONNECT}] | -serverpool <pool_name> [-cardinality {UNIFORM | SINGLETON}] } [-netnum <network_number>] [-role "[PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]"] [-policy {AUTOMATIC | MANUAL}] [-notification {TRUE|FALSE}] [-dtp {TRUE|FALSE}] [-clbgoal {SHORT|LONG}] [-rlbgoal {NONE|SERVICE_TIME|THROUGHPUT}] [-failovertype {NONE|SESSION|SELECT|TRANSACTION}] [-failovermethod {NONE|BASIC}] [-failoverretry <failover_retries>] [-failoverdelay <failover_delay>] [-edition <edition>] [-pdb <pluggable_database>] [-global {TRUE|FALSE}] [-maxlag <max_lag_time>] [-sql_translation_profile <sql_translation_profile>] [-commit_outcome {TRUE|FALSE}] [-retention <retention>] [-replay_init_time <replay_initiation_time>] [-session_state {STATIC|DYNAMIC}] [-pqservice <pq_service>] [-pqpool <pq_pool_list>] [-gsmflags <gsm_flags>] [-force] [-eval] [-verbose]
-db <db_unique_name> Unique name for the database
-service <service> Service name
-preferred "<preferred_list>" Comma separated list of preferred instances
-available "<available_list>" Comma separated list of available instances
-serverpool <pool_name> Server pool name
-cardinality (UNIFORM | SINGLETON) Service runs on every active server in the server pool hosting this service (UNIFORM) or just one server (SINGLETON)
-netnum <network_number> Network number (default number is 1)
-tafpolicy (NONE | BASIC | PRECONNECT) TAF policy specification
-role <role> Role of the service (primary, physical_standby, logical_standby, snapshot_standby)
-policy <policy> Management policy for the service (AUTOMATIC or MANUAL)
-failovertype (NONE | SESSION | SELECT | TRANSACTION) Failover type
-failovermethod (NONE | BASIC) Failover method
-failoverdelay <failover_delay> Failover delay (in seconds)
-failoverretry <failover_retries> Number of attempts to retry connection
-edition <edition> Edition (or "" for empty edition value)
-pdb <pluggable_database> Pluggable database name
-maxlag <maximum replication lag> Maximum replication lag time in seconds (Non-negative integer, default value is 'ANY')
-clbgoal (SHORT | LONG) Connection Load Balancing Goal. Default is LONG.
-rlbgoal (SERVICE_TIME | THROUGHPUT | NONE) Runtime Load Balancing Goal
-dtp (TRUE | FALSE) Distributed Transaction Processing
-notification (TRUE | FALSE) Enable Fast Application Notification (FAN) for OCI connections
-global <global> Global attribute (TRUE or FALSE)
-preferred <new_pref_inst> Name of new preferred instance
-available <new_avail_inst> Name of new available instance
-sql_translation_profile <sql_translation_profile> Specify a database object for SQL translation profile
-commit_outcome (TRUE | FALSE) Commit outcome
-retention <retention> Specifies the number of seconds the commit outcome is retained
-replay_init_time <replay_init_time> Seconds after which replay will not be initiated
-session_state <session_state> Session state consistency (STATIC or DYNAMIC)
-pqservice <pq_service> Parallel query service name
-pqpool <pq_pool> Parallel query server pool name
-gsmflags <gsm_flags> Set locality and region failover values
-eval Evaluates the effects of event without making any changes to the system
Usage: srvctl add service -db <db_unique_name> -service <service_name> -update {-preferred "<new_pref_inst>" | -available "<new_avail_inst>"} [-force] [-verbose]
-db <db_unique_name> Unique name for the database
-service <service> Service name
-update Add a new instance to service configuration
-preferred <new_pref_inst> Name of new preferred instance
-available <new_avail_inst> Name of new available instance
-force Force the add operation even though a listener is not configured for a network
-verbose Verbose output
-help Print usage
Service name: bt01pimi_tools_con
Service is enabled
Server pool: bt01pimi_bt01pimi_tools_con
Cardinality: 3
Disconnect: false
Service role: PRIMARY
Management policy: AUTOMATIC
DTP transaction: false
AQ HA notifications: false
Failover type: SESSION
Failover method: BASIC
TAF failover retries: 0
TAF failover delay: 0
Connection Load Balancing Goal: LONG
Runtime Load Balancing Goal: NONE
TAF policy specification: BASIC
Edition:
Preferred instances: bt01pimi1,bt01pimi2,bt01pimi3
Available instances:
#################################################################################
# Oracle Binary Options & Direct FNS (dNFS) setting
# MOS Note ID: 948061.1 - How to Check and Enable/Disable Oracle Binary Options
#################################################################################
#--- List the object files for different options
#-- Object files for turning Options ON and OFF
grep _ON $ORACLE_HOME/rdbms/lib/env_rdbms.mk |grep OBJ_EXT
grep _OFF $ORACLE_HOME/rdbms/lib/env_rdbms.mk |grep OBJ_EXT
FORMAT="%35s \t %3s \n"
for i in `strings $ORACLE_HOME/lib/libodm??.so | grep odm_init | awk '{print $NF}' | cut -d" " -f1` `ar -t $ORACLE_HOME/rdbms/lib/libknlopt.a |awk '{print $NF}' `
do
case $i in
kfon.o) printf "$FORMAT" "Oracle ASM" "ON";;
kfoff.o) printf "$FORMAT" "Oracle ASM" "OFF";;
kciwcx.o) printf "$FORMAT" "Oracle Context" "ON";;
kcincx.o) printf "$FORMAT" "Oracle Context" "OFF";;
dmwdm.o) printf "$FORMAT" "Oracle Data Mining" "ON";;
dmndm.o) printf "$FORMAT" "Oracle Data Mining" "OFF";;
ktd.o) printf "$FORMAT" "Oracle DDB" "ON";;
ksnktd.o) printf "$FORMAT" "Oracle DDB" "OFF";;
kzvidv.o) printf "$FORMAT" "Oracle Database Vault" "ON";;
kzvndv.o) printf "$FORMAT" "Oracle Database Vault" "OFF";;
kgodm_init) printf "$FORMAT" "Oracle Direct NFS" "ON";;
odm_init) printf "$FORMAT" "Oracle Direct NFS" "OFF";;
jox.o) printf "$FORMAT" "Oracle JOX" "ON";;
joxoff.o) printf "$FORMAT" "Oracle JOX" "OFF";;
kprwts.o) printf "$FORMAT" "Oracle KPT" "ON";;
kprnts.o) printf "$FORMAT" "Oracle KPT" "OFF";;
kzlilbac.o) printf "$FORMAT" "Oracle Label Security" "ON";;
kzlnlbac.o) printf "$FORMAT" "Oracle Label Security" "OFF";;
xsyeolap.o) printf "$FORMAT" "Oracle OLAP" "ON";;
xsnoolap.o) printf "$FORMAT" "Oracle OLAP" "OFF";;
kkpoban.o) printf "$FORMAT" "Oracle Partitioning" "ON";;
ksnkkpo.o) printf "$FORMAT" "Oracle Partitioning" "OFF";;
kkxwtp.o) printf "$FORMAT" "Oracle PL/SQL" "ON";;
kkxntp.o) printf "$FORMAT" "Oracle PL/SQL" "OFF";;
kcsm.o) printf "$FORMAT" "Oracle Real Application Cluster" "ON";;
ksnkcs.o) printf "$FORMAT" "Oracle Real Application Cluster" "OFF";;
kecwr.o) printf "$FORMAT" "Oracle Real Application Testing" "ON";;
kecnr.o) printf "$FORMAT" "Oracle Real Application Testing" "OFF";;
kxmwsd.o) printf "$FORMAT" "Oracle SDO" "ON";;
kxmnsd.o) printf "$FORMAT" "Oracle SDO" "OFF";;
kdzof.o) printf "$FORMAT" "Oracle ZDK" "OFF";;
sllfls.o) printf "$FORMAT" "Oracle LIL" "OFF";;
xaonsl.o) printf "$FORMAT" "Oracle XAS" "OFF";;
xaondy.o) printf "$FORMAT" "Oracle XAD" "OFF";;
*) printf "$FORMAT" "$i" "" ;;
esac
done |sort
/*------------------------------------------------------------------------------------
Convert Physical standby to Snapshost standby
Ref - http://docs.oracle.com/cd/B28359_01/server.111/b28294/manage_ps.htm#BACIEJJI
------------------------------------------------------------------------------------*/
sqlplus / as sysdba <<EOF
alter database recover managed standby database cancel;
shutdown immediate
startup mount
alter database convert to snapshot standby;
shutdown immediate
startup
EOF
/*------------------------------------------------------------------------------------
Convert Snapshost standby to Physical standby
------------------------------------------------------------------------------------*/
sqlplus / as sysdba <<EOF
shutdown immediate
startup mount
alter database convert to physical standby;
shutdown immediate
startup mount
alter database recover managed standby database cancel ;
EOF
/*------------------------------------------------------------------------------------
Disable Redo Thread
------------------------------------------------------------------------------------*/
select 'alter database disable thread ' || thread# ||';' from v$thread t where status = 'CLOSED';
select 'alter database drop logfile group ' || group# || ';' from v$log l , v$thread t where l.thread# = t.thread# and t.enabled ='DISABLED';
/*------------------------------------------------------------------------------------
Check if background process is FATAL or not (Linux)
If there is FATAL in SKGP_HIDDEN_ARGS, then its critical for instance otherwise its not critical.
------------------------------------------------------------------------------------*/
ps auxwwwe |grep ora_smon_pr01pimi3 |grep -v grep |awk -F"SKGP_HIDDEN_ARGS=" '{print $2}'
ps auxwwwe |grep ora_p000_pr01pimi3 |grep -v grep |awk -F"SKGP_HIDDEN_ARGS=" '{print $2}'
<FATAL/S/SMON/x0/x1C/x0/x1A1F059A/98085/97704/x1B/x1D/x1/x1A1F0C98/1396114691/1396114691> 0 SKGP_SPAWN_DIAG_PRE_EXEC_TS=1396114691
<BG/S/P001/x0/xA9/x242F/x1A1F059A/98085/56780/x5C9DB/xAB/xD6/x13814058/1400299289/1400299289> 0 SKGP_SPAWN_DIAG_PRE_EXEC_TS=1400299289
/*------------------------------------------------------------------------------------
Dead Transaction Recovery (Rollback)
------------------------------------------------------------------------------------*/
select INST_ID, ktuxeusn USN, ktuxeslt Slot, ktuxesqn Seq, ktuxesta State, ktuxesiz Undo, ktuxecfl
from x$ktuxe
where ktuxesta <> 'INACTIVE'
and ktuxecfl like '%DEAD%'
order by ktuxesiz asc;
select useg.segment_name, useg.segment_id, useg.tablespace_name, useg.status
from dba_rollback_segs useg
where useg.segment_id in (select unique ktuxeusn
from x$ktuxe
where ktuxesta <> 'INACTIVE'
and ktuxecfl like '%DEAD%');
select usn, state, undoblockstotal "Total", undoblocksdone "Done", undoblockstotal-undoblocksdone "ToDo",
decode(cputime,0,'unknown',sysdate+(((undoblockstotal-undoblocksdone) / (undoblocksdone / cputime)) / 86400)) "Estimated time to complete"
from v$fast_start_transactions;
/*------------------------------------------------------------------------------------
DFS lock handle
Ref :
Doc ID 34631.1 - CI Lock "Cross-Instance Call Invocation"
Doc ID 34566.1 - WAITEVENT: "enqueue" Reference Note
------------------------------------------------------------------------------------*/
select sw.sid
, sw.inst_id
, chr(bitand(sw.p1,-16777216)/16777215)
|| chr(bitand(sw.p1,16711680)/65535) "Lock"
, lt.description "Lock Name"
, DECODE(bitand(sw.p1, 65535)
,0,'none'
,1,'null (NULL)'
,2,'row-S (SS)'
,3,'row-X (SX)'
,4,'share (S)'
,5,'S/Row-X (SSX)'
,6,'exclusive (X)'
,bitand(sw.p1, 65535)
) "Mode"
, DECODE(chr(bitand(sw.p1,-16777216)/16777215)|| chr(bitand(sw.p1,16711680)/65535)
,'CI', DECODE(sw.p2
, '0','Checkpoint block range'
, '1','Reuse (checkpoint and invalidate) block range'
, '2','LGWR Checkpointing and Hot Backup'
, '3','DBWR syncronization of SGA with control file'
, '4','Log file add/drop/rename notification'
, '5','Write buffer for CR read'
, '6','Test call'
, '7','Invalidate KCK cache in all instances'
, '8','Alter rollback segment optimal'
, '9','Signal Query Servers/coordinator'
, '10','Create Remote parallel query Server'
, '11','Set Global Partitions'
, '12','Stop Disk Writes'
, '13','Drop Sort Segments'
, '14','Release unused space from Sort Segments'
, '15','Instance Recovery for Parallel operation Group'
, '16','Validate parallel slave Lock Value'
, '17','Check transaction state objects'
, '18','Object reuse request'
, '19','Rolling release checks'
, '20','Propagate begin backup scn for a file'
, '21','Refresh top plan (for db scheduler)'
, '22','Clear checkpoint progress record'
, '23','Drop temp file'
, '24','Quiesce database Restricted '
, '25','Update Dscn Tracking (ktcndt)'
, '26','Purge dictionary Object number Cache'
, '27','set Database Force Logging mode'
, '28','invalidate cached file address translations'
, '29','Cursor Unauthorize Mode'
, '30','process waiters after row cache requeue'
, '31','Active Change Directory extent relocation'
, '32','block change tracking state change'
, '33','kgl mulitversion obsolete'
, '34','set previous resetlogs data'
, '35','set recovery destination pointer'
, '36','fast object reuse request'
, '37','test ksbcic()'
, '38','ASM diskgroup discovery wait'
, '39','ASM diskgroup release'
, '40','ASM push DB update'
, '41','ASM add ACD chunk'
, '42','ASM map resize message '
, '43','ASM map lock message'
, '44','ASM map unlock message (phase 1)'
, '45','ASM map unlock message (phase 2)'
, '46','ASM generate add disk redo marker'
, '47','ASM check of PST validity'
, '48','ASM offline disk CIC'
, '49','Logical Standby Sync Point SCN'
, '50','update SQL Tuning Base existence bitvector'
, '51','PQ induced Checkpointing '
, '52','ASM F1X0 relocation'
, '53','Scheduler autostart'
, '54','KZS increment grant/revoke counter'
, '55','ASM disk operation message'
, '56','ASM I/O error emulation'
, '57','DB Supp log cursor invalidation'
, '58','Cache global range invalidation'
, '59','Cache global object invalidation'
, '60','ASM Pre-Existing Extent Lock wait'
, '61','Perform a ksk action through DBWR'
, '62','ASM diskgroup refresh wait '
, '63','KCBO object checkpoint'
, '64','KCBO object pq checkpoint'
, '65','global health check event '
, '66','Oracle Label Security refresh '
, '67','thread internal enable'
, '68','cross-instance registration'
, '69','KGL purge unused subheaps'
, '70','clear pin instance flag '
, '71','Rolling operations CIC'
, '72',''
, '73','readable stby: metadata flush'
, '74','slave dskm message'
, '75','update sysaux state sga variable'
, '76','notify storage server reg/dereg'
, '77','ASM ACD relocation xtnt info'
, '78','Misc LogMiner CIC'
, '79',''
, '101','ASM disk error emulation'
, '102','Audit Management operations'
, '103','invalidate sql profile cursors'
, '104','Misc RMAN CIC'
, '105','standby role change'
, '106','Voting file refresh CIC'
, '107','LGWR miscellaneous'
, '108','Invalidate ASM spfile xmap CIC'
, '109','load new system statistics'
, '110','sync datafile move state'
, '111','KSB Rolling Migration Test1'
, '112','ASM map lock message for AVD'
, '113','Oracle Label Security install'
, '114','Oracle Label Security enforce'
, '115','Oracle Label Security config'
, '116','Oracle Database Vault config'
, '117','Invalidate DB Props cache CIC'
, '118','OBSOLETE'
, '119','Propagate final ODPS'
, '120','Post for cleanup of Audit'
, '121','Assist offline from the DB side'
, '122','Assist offline from the ASM side'
, '123','Nuke ILM file stats'
, '124','12.1 RM CIC in RMON'
, sw.p2)
, sw.p2) id1
, DECODE(chr(bitand(sw.p1,-16777216)/16777215)|| chr(bitand(sw.p1,16711680)/65535)
,'CI', DECODE(sw.p2
, '0x01','Used to pass in parameters'
, '0x02','Used to invoke the function in backgroud process'
, '0x03','Used to indicate the foreground has not returned'
, '0x04','Mounted excl, use to allocate mechanism'
, '0x05','Used to queue up interested clients '
, sw.p2)
, sw.p2
) id2
from gv$session_wait sw
LEFT OUTER JOIN v$lock_type lt ON lt.type = chr(bitand(sw.p1,-16777216)/16777215)|| chr(bitand(sw.p1,16711680)/65535)
where sw.event = 'DFS lock handle';
--#############################
-- Enable/Disable Services
--#############################
DBNAME=pr01pimi
HOSTNAME=dm01db11
for i in `crsctl stat res -w "NAME co $DBNAME"|egrep -A4 .svc |egrep 'STATE|^NAME' |egrep -B1 $HOSTNAME |grep 'NAME' |cut -d. -f3`
do
echo srvctl disable service -d $DBNAME -s $i -n $HOSTNAME
done
for i in `crsctl stat res -w "NAME co $DBNAME"|egrep -A4 .svc |egrep 'STATE|^NAME' |egrep -B1 $HOSTNAME |grep 'NAME' |cut -d. -f3`
do
echo srvctl stop service -d $DBNAME -s $i -n $HOSTNAME
done
for i in `crsctl stat res -w "NAME co $DBNAME"|egrep -A4 .svc |egrep 'STATE|^NAME' |egrep -B1 $HOSTNAME |grep 'NAME' |cut -d. -f3`
do
echo srvctl enable service -d $DBNAME -s $i -n $HOSTNAME
done
for i in `crsctl stat res -w "NAME co $DBNAME"|egrep -A4 .svc |egrep 'STATE|^NAME' |egrep -B1 $HOSTNAME |grep 'NAME' |cut -d. -f3`
do
echo srvctl start service -d $DBNAME -s $i -n $HOSTNAME
done
-----------------------------
-- Recovery Session Waits
-----------------------------
SELECT p.pid, s.program
--,s.module, s.action
, DECODE(sw.p1text, 'log#',(select max(first_Time) from v$archived_log l where l.sequence# = sw.p1 ),'') archlog_first_Time
, sw.*
FROM gv$session_wait sw
JOIN gv$session s ON s.inst_id = sw.inst_id and sw.sid = s.sid and s.program like '%'
JOIN gv$process p ON s.inst_id = p.inst_id and s.paddr = p.addr
where sw.wait_class <> 'Idle'
and s.program like '%(PR%'
order by s.program
;
-----------------------------
-- Parallel Recovery process
-----------------------------
SELECT p.inst_id
, p.pname
, DECODE(sw.p1text, 'log#',(select thread# from v$archived_log l2
where l2.first_time = (select max(first_Time) from v$archived_log l where l.sequence# = sw.p1 )
and l2.sequence# = sw.p1)
,'') thread#
, DECODE(sw.p1text, 'log#',sw.p1 ,'') sequence#
, DECODE(sw.p1text, 'log#',(select max(first_Time) from v$archived_log l where l.sequence# = sw.p1 ),'') archlog_first_Time
, DECODE(sw.p1text, 'log#',sw.p2 ,'') block#
, DECODE(sw.p1text, 'log#',sw.p3 ,'') blocks
, sw.event
, sw.p1text
, sw.p1
FROM gv$session_wait sw
JOIN gv$session s ON s.inst_id = sw.inst_id and sw.sid = s.sid and s.program like '%'
JOIN gv$process p ON s.inst_id = p.inst_id and s.paddr = p.addr
where sw.wait_class <> 'Idle'
and s.program like '%(PR%'
order by s.program
;
-----------------------------
-- Recovery Sesssion Events
-----------------------------
select s.program, se.*
From gv$session_event se
JOIN gv$session s ON s.inst_id = se.inst_id and se.sid = s.sid
JOIN gv$process p ON p.inst_id = p.inst_id and s.paddr = p.addr
where s.program like '%(PR%'
and se.wait_class not in ('Idle')
order by se.time_waited desc
;
SELECT * FROM gv$managed_standby;
SELECT * FROM gv$recovery_progress;
select * From v$recovery_status;
select * From V$RECOVERY_FILE_STATUS;
select * From V$instance_recovery;
select * From V$instance_recovery;
SELECT thread#, sequence#, status, applied,archived, deleted, first_Time, next_time FROM v$archived_log where sequence# = 156987 order by first_time desc;
-----------------------------
-- Get Latch details
--
-- Ref
-- http://www.pythian.com/blog/tuning-latch-contention-cache-buffers-chain-latches/
-- http://blog.tanelpoder.com/2010/02/15/new-versions-of-latchprof-and-latchprofx-for-latch-contention-troubleshooting-and-tuning/
-----------------------------
-- Get latch Summary
set lines 1000
set verify off
@latchprof name % % 1000
@latchprof name,sid % % 1000
@latchprof name,sid % "cache buffers chains" 1000
@latchname 186
@latchname "%cache%"
-- Get latch details
select /*+ leading(s) USE_NL(s l) */
l.*
, s.sid
, s.inst_id
, s.userame
, s.osuser
, s.program
From gv$session s
JOIN gV$LATCH_CHILDREN l ON l.inst_id = s.inst_id and l.addr = RAWTOHEX(s.P1RAW)
WHERE s.event like '%latch%'
;
---------------------------------------
-- Dynamic Object Block Remastering
---------------------------------------
SELECT o.owner
, o.object_name
, o.subobject_name
, o.object_type
, f.tablespace_name
, g.file_id
, g.gc_mastering_policy
, g.current_master
, g.previous_master
, g.remaster_cnt
FROM v$gcspfmaster_info g
INNER JOIN dba_objects o on o.data_object_id = g.data_object_id
LEFT OUTER JOIN dba_data_files f ON f.file_id = g.file_id
WHERE 1=1
AND o.owner LIKE '%'
AND o.object_name LIKE '%'
;
------------------------------------------
-- Convert comma separated list to rows
-------------------------------------------
SELECT regexp_substr(:csv_list, '[^,]+', 1, LEVEL) token
FROM dual
CONNECT BY instr(:csv_list, ',', 1, LEVEL - 1) > 0
;
------------------------------------------
-- Convert rows to comma separated list
-------------------------------------------
select
listagg(name, ',')
within group (order by id) as list
from student;
select RTRIM(XMLAGG(XMLELEMENT(E,TABLE_NAME,',').EXTRACT('//text()') ORDER BY TABLE_NAME).GETCLOBVAL(),',') AS TABLE_LIST
from dba_tables;
--------------------------------------------------------------
-- Delete Cluster Health Service , Berkley database file
-- ora.crf resource can be stopped without impacting anything
--------------------------------------------------------------
cd /u01/app/11.2.0.4/grid/crf/db/`hostname -s`/
crsctl stop res ora.crf <20>init
sudo <20>u root rm crfclust.bdb
crsctl start res ora.crf <20>init
crsctl status res ora.crf <20>init
--------------------------------------------------------------
-- Get Partition name based on high_value
-- Convert LONG to VARCHAR2
--------------------------------------------------------------
select *
from
xmltable( '/ROWSET/ROW'
passing dbms_xmlgen.getXMLType('
select table_owner
, table_name
, partition_name
, subpartition_count
, high_value /* <---- Long Column type*/
from dba_tab_partitions
WHERE table_owner = ''LEVERAGE_OWN''
AND table_name = ''TEMPLATE_RESULT_DETAILS_CLOSE''
'
)
columns
table_owner varchar2(30)
, table_name varchar2(30)
, partition_name varchar2(30)
, subpartition_count NUMBER
, high_value varchar2(4000)
)
WHERE 1=1
and high_value like '%' || TO_CHAR(trunc(sysdate+1),'YYYY-MM-DD HH24:MI:SS') || '%'
;
--------------------------------------------------------------
-- Automatic Diagnostic Repository (ADR)
--------------------------------------------------------------
$ORACLE_HOME/bin/adrci <<EOF
show incident -orderby create_time DESC
EOF
-------------------------------------------------------------------
-- Shrink shared pool
-- (Run on the specific instance for which actions are being taken)
-------------------------------------------------------------------
alter system flush shared_pool;
alter system set sga_target = 0 scope=memory sid='bt01pimi5';
alter system set shared_pool_size = 10G scope=memory sid='bt01pimi5';
alter system set sga_target = 39G scope=memory sid='bt01pimi5';
-------------------------------------------------------------------
-- ASM Disk Errors
-------------------------------------------------------------------
SELECT failgroup
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_00',read_errs+write_errs,0)) CD_00
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_01',read_errs+write_errs,0)) CD_01
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_02',read_errs+write_errs,0)) CD_02
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_03',read_errs+write_errs,0)) CD_03
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_04',read_errs+write_errs,0)) CD_04
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_05',read_errs+write_errs,0)) CD_05
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_06',read_errs+write_errs,0)) CD_06
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_07',read_errs+write_errs,0)) CD_07
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_08',read_errs+write_errs,0)) CD_08
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_09',read_errs+write_errs,0)) CD_09
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_10',read_errs+write_errs,0)) CD_10
, sum(DECODE(substr(name,INSTR(name,'_',-1,3)+1,5), 'CD_11',read_errs+write_errs,0)) CD_11
FROM gv$asm_disk_stat
where name like '%CD%'
GROUP BY failgroup
ORDER BY 1;
-------------------------------------------------------------------
--- Move LONG
-------------------------------------------------------------------
alter table <tablename> move LOB ( <columnname>.XMLDATA ) STORE AS ( TABLESPACE <tablespacename> );
-------------------------------------------------------------------
--- Move LOB
-------------------------------------------------------------------
alter table <tablename> move LOB ( <columnname>.XMLDATA ) STORE AS ( TABLESPACE <tablespacename> );
-------------------------------------------------------------------
--- Move XMLType
-------------------------------------------------------------------
alter table <tablename> move LOB ( <columnname> ) STORE AS ( TABLESPACE <tablespacename> );
-------------------------------------------------------------------
--- SQL Binds
-------------------------------------------------------------------
SELECT s.inst_id
, s.sql_id
, b.child_number
, b.child_address
, b.name bind_name
, B.DATATYPE_STRING bind_datatype
, b.value_string bind_value
, B.VALUE_ANYDATA
, B.WAS_CAPTURED
, b.last_captured
FROM gv$sql s
JOIN gv$sql_bind_capture b ON b.inst_id = s.inst_id and b.sql_id = s.sql_id
WHERE s.sql_id='c7y4045sx7ax6'
;
-------------------------------------------------------------------
--- To speedup queries against DBA_FREE_SPACE view.
-------------------------------------------------------------------
@stats SYS X$KTFBUE
@stats SYS RECYCLEBIN$
exec dbms_stats.gather_table_stats('SYS','X$KTFBUE',degree=>16)
exec dbms_stats.gather_table_stats('SYS','RECYCLEBIN$',degree=>16)
SELECT NVL(SUM(BYTES),0)/(1024*1024) FREE_BYTES FROM DBA_FREE_SPACE WHERE TABLESPACE_NAME = 'SYSAUX';
-------------------------------------------------------------------
--- Relocate SCAN listener in OEM from one agent to another
-------------------------------------------------------------------
#
# On OEM application server , where weblogic runs.
# Example location of emcli
# $OMS_HOME/oms/bin/emcli
# /u01/oracle/app/product/Middleware/oms12c/oms/bin/emcli
#
emcli help relocate_targets
emcli login<69> -username=<oem_username> -password=<yourpassword>
emcli relocate_targets \
-src_agent=source.hostname.com:1830 \
-dest_agent=target.hostname.com:1830 \
-target_name=LISTENER_SCAN1_dmz \
-target_type=oracle_listener \
-copy_from_src \
-force=yes
-------------------------------------------------------------------
--- APR cache debugging
-------------------------------------------------------------------
# Check that MAC address of SCAN VIP on client machines matches the MAC address on DB server of underlying bond interface for a SCAN VIP address.
# Display current arp cache on a host
/sbin/arp -h
/sbin/arp -v |sort -k1 # Display dns names of hosts
/sbin/arp -vn |sort -k1 # Display numeric IPs
arping
$ /sbin/arp -h
Usage:
arp [-vn] [<HW>] [-i <if>] [-a] [<hostname>] <-Display ARP cache
arp [-v] [-i <if>] -d <hostname> [pub][nopub] <-Delete ARP entry
arp [-vnD] [<HW>] [-i <if>] -f [<filename>] <-Add entry from file
arp [-v] [<HW>] [-i <if>] -s <hostname> <hwaddr> [temp][nopub] <-Add entry
arp [-v] [<HW>] [-i <if>] -s <hostname> <hwaddr> [netmask <nm>] pub <-''-
arp [-v] [<HW>] [-i <if>] -Ds <hostname> <if> [netmask <nm>] pub <-''-
-a display (all) hosts in alternative (BSD) style
-e display (all) hosts in default (Linux) style
-s, --set set a new ARP entry
-d, --delete delete a specified entry
-v, --verbose be verbose
-n, --numeric don't resolve names
-i, --device specify network interface (e.g. eth0)
-D, --use-device read <hwaddr> from given device
-A, -p, --protocol specify protocol family
-f, --file read new entries from file or from /etc/ethers
<HW>=Use '-H <hw>' to specify hardware address type. Default: ether
List of possible hardware types (which support ARP):
strip (Metricom Starmode IP) ash (Ash) ether (Ethernet)
tr (16/4 Mbps Token Ring) tr (16/4 Mbps Token Ring (New)) ax25 (AMPR AX.25)
netrom (AMPR NET/ROM) rose (AMPR ROSE) arcnet (ARCnet)
dlci (Frame Relay DLCI) fddi (Fiber Distributed Data Interface) hippi (HIPPI)
irda (IrLAP) x25 (generic X.25) infiniband (InfiniBand)
$ /sbin/arping -h
Usage: arping [-fqbDUAV] [-c count] [-w timeout] [-I device] [-s source] destination
-f : quit on first reply
-q : be quiet
-b : keep broadcasting, don't go unicast
-D : duplicate address detection mode
-U : Unsolicited ARP mode, update your neighbours
-A : ARP answer mode, update your neighbours
-V : print version and exit
-c count : how many packets to send
-w timeout : how long to wait for a reply
-I device : which ethernet device to use (eth0)
-s source : source ip address
destination : ask for what ip address
-------------------------------------------------------------------
--- Excel Size Analysis
-------------------------------------------------------------------
Press Alt-F11 and copy the sub below into any VB module.
Position the cursor anywhere in the Sub and press F5.
The results appear in the Immediate Window, which is usually to be found at the bottom of the VBA window.
Type Control-G if it<69>s not visible.
Sub ScanSheets()
'report sheet used range size, number of Query Tables on it and number of pivot tables
Dim ws As Worksheet
Debug.Print Left("Name" & String$(20, " "), 20) & Left("Rows" & String$(8, " "), 8) & Left("Cols" & String$(6, " "), 6) & "QueryTables" & vbTab & "PivotTables"
Debug.Print String$(60, "-")
For Each ws In Worksheets
Debug.Print Left(ws.Name & String$(20, " "), 20) & _
Left(CStr(ws.UsedRange.Rows.Count) & String$(8, " "), 8) & _
Left(CStr(ws.UsedRange.Columns.Count) & String$(6, " "), 6) & _
ws.QueryTables.Count & vbTab & vbTab & vbTab & vbTab & ws.PivotTables.Count
Next 'ws
End Sub
Here<EFBFBD>s the results for one of the sheets Paolo sent:
Name Rows Cols QueryTables PivotTables
------------------------------------------------------------
2012red 246 53 0 0
bill2012 164 21 0 0
2012 217 49 0 0
CAL act 513 21 0 0
Cal exp. 481 21 0 0
cheat 51 27 0 0
database 147 142 0 0
BM bonds 242 18 0 0
eccomm 230 93 0 0
this week 109 42 0 0
create table 4886 16384 0 0
history2 771 23 0 0
history1 64521 87 0 0
overview 579 61 0 4
ovhist 106 18 0 0
disc 740 37 0 0
Italy 322 28 0 0
Italy 2 214 148 0 0
EFSF 163 16314 0 0
GreecePSI 1048048 44 0 0
auction 282 42 0 0
Port 145 17 0 0
study 56 31 0 0
last week 95 15 0 0
spain2 39 13 0 0
Spain 120 15 0 0
I first looked at the 4 Pivot Tables on sheet <20>overview<65>. I saw that all these were taking 65k rows from sheet <20>history1<79> and expect that this is the problem the user was having.
I then went in and deleted extra rows and columns from <20>create table<6C>, <20>history1<79>, <20>EFSF<53> and <20>GreecePSI<53>
This reduced the file footprint from 33MB to 3MB but also had the effect of cutting down the range used by the 4 Pivot Tables on sheet <20>overview.<2E>
I would expect the reduced sheet (attached) to run without problem.
There are no MS Queries or other code to populate <20>history1<79> so one thing to do is find out how it is being populated and provide a macro that automatically resizes the range of the PivotTables when the user changes <20>history1<79>
Tell me if this is too much information ?
-------------------------------------------------------------
-- Hourly DB Statistics (Derived from ASH)
-------------------------------------------------------------
select TO_CHAR(TRUNC(ash.sample_time,'MI'),'YYYY-MM-DD HH24:MI') sample_time
, SUM(ash.DELTA_READ_IO_REQUESTS) read_io_requests
, sum(ash.delta_write_io_requests) "Phy"
, ROUND(sum(ash.delta_read_io_bytes)/power(1024,2)) "PhyRead (MB)"
, ROUND(sum(ash.delta_write_io_bytes)/power(1024,2)) "PhyWrite (MB)"
, ROUND(sum(ash.delta_interconnect_io_bytes)/power(1024,2)) "Interconnect (MB)"
, sum(ash.tm_delta_time) tm_time
, sum(ash.tm_delta_cpu_time) tm_cpu_time
, sum(ash.tm_delta_db_time) tm_db_time
from gv$active_session_history ash
where ash.sample_time > systimestamp - 1/24
GROUP BY TRUNC(ash.sample_time,'MI')
ORDER BY TRUNC(ash.sample_time,'MI')
;
-------------------------------------------------------------
-- DB Statistics by User (Derived from SQL Statistics)
-------------------------------------------------------------
SELECT ss.parsing_schema_name
, TO_CHAR(MIN(s.end_interval_time),'DD-MON-YYYY HH24:MI') min_snapshot_time
, TO_CHAR(MAX(s.end_interval_time),'DD-MON-YYYY HH24:MI') max_snapshot_time
, sum(ss.executions_delta) executions
, sum(ss.parse_calls_delta) parse_calls
, sum(ss.disk_reads_delta) disk_reads
, sum(ss.buffer_gets_delta) buffer_gets
, sum(ss.rows_processed_delta) rows_processed
, sum(ss.cpu_time_delta) cpu_time
, sum(ss.elapsed_time_delta) elapsed_time
, sum(ss.iowait_delta) iowait_time
, sum(ss.clwait_delta) cluster_wait_time
, sum(ss.ccwait_delta) concurrency_wait_time
, sum(SS.apwait_delta) app_wait_time
, sum(ss.plsexec_time_delta) pls_exec_time
, sum(ss.javexec_time_delta) java_exec_time
, sum(ss.direct_writes_delta) direct_writes
, sum(ss.io_interconnect_bytes_delta) io_interconnect_bytes
, sum(ss.io_offload_elig_bytes_delta) io_offload_elig_bytes
, sum(ss.io_offload_return_bytes_delta) io_offload_return_bytes
, sum(ss.physical_write_bytes_delta) physical_write_bytes
, sum(ss.physical_read_bytes_delta) physical_read_bytes
, sum(ss.optimized_physical_reads_delta) optimized_physical_reads
, sum(ss.cell_uncompressed_bytes_delta) cell_uncompressed_bytes
FROM v$database d
JOIN dba_hist_snapshot s ON s.dbid = d.dbid
JOIN dba_hist_sqlstat ss ON ss.dbid = s.dbid AND ss.instance_number = s.instance_number AND ss.snap_id = s.snap_id
WHERE 1=1
AND s.end_interval_time > systimestamp - 1
GROUP BY ss.parsing_schema_name
-- , TRUNC(s.end_interval_time,'HH24')
ORDER BY
-- physical_read_bytes desc nulls last
-- physical_write_bytes desc nulls last
-- direct_writes desc nulls last
-- disk_reads desc nulls last
-- buffer_gets desc nulls last
-- optimized_physical_reads desc nulls last
-- cell_uncompressed_bytes desc nulls last
-- io_interconnect_bytes desc nulls last
executions desc nulls last
-- parse_calls desc nulls last
-- elapsed_time desc nulls last
-- TRUNC(s.end_interval_time,'HH24')
;
#######################################
# LDAP/DNS commands
########################################
# List LDAP Servers
host -t SRV _ldap._tcp
ldapsearch -x -h ldapserver -s sub -b "DC=example,DC=com" cn='*namesearch_with_wildcard*' cn
-x - simple authentication
-h - ldaphost
-b - searchbase
-s - search subtree
#######################################
# Find missing entry in known_hosts
########################################
for i in $(seq -f "%03g" 236 500)
do
ssh-keyscan server$i
done \
| sort -u - ~/.ssh/known_hosts | diff ~/.ssh/known_hosts -
#################################
# Add ssh keys between nodes
#################################
expect -c '
spawn dcli -l root -g all_group -k
while {1} {
expect "(yes/no)?" { send "yes\n"}
expect "password:" { send "welcome1\n"}
}
'
# print leading zero in 2 digit numbers in bash
for i in $(seq -f "%03g" 236 400)
do
ssh-keyscan dtraflocorh$i
done
for i in $(seq -f "%03g" 236 400)
do
scp -pr .ssh dtraflocorh$i:
done
for i in $(seq -f "%03g" 236 400)
do
expect -c '
spawn scp -pr .ssh dtraflocorh${i}:
while {1} {
expect "(yes/no)?" { send "yes\n"}
expect "password:" { send "welcome1\n"}
}
'
done
#!/usr/bin/expect
set $argv[0] [lindex $argv 0]
set timeout 3
expect -c '
spawn scp -pr .ssh dtraflocorh${i}:
while {1} {
expect "(yes/no)?" { send "yes\n"}
expect "password:" { send "welcome1\n"}
}
'
for i in $(seq -f "%03g" 236 400)
do
expect -f /tmp/a.ex
done
for i in $(seq -f "%03g" 236 400)
do
expect -c '
set HOST [lindex $argv 0]
spawn echo $HOST:
while {1} {
expect "(yes/no)?" { send "yes\n"}
expect "password:" { send "Bootit1234\n"}
}
'
done
for i in $(seq -f "%03g" 236 400)
do
/tmp/a.ex dtraflocorh${i}
done
#################################
# Switch all logfile in RAC
#################################
alter system switch all logfile ;
#################################
# Tnsnames.ora file read
#################################
egrep -i '^[A-Z]|HOST|service' titan_se.tns |while read line
do
if [ `echo $line |egrep -i '^[A-Z]' |wc -l` -gt 0 ]; then TNSALIAS=$(echo $line|cut -d= -f1); fi;
if [ `echo $line |egrep -i 'HOST' |wc -l` -gt 0 ]; then HOST=$(echo $line|tr -d ' '| awk -F"HOST=" '{print $2 }'|cut -d')' -f1); fi;
if [ `echo $line |egrep -i 'SERVICE' |wc -l` -gt 0 ];
then
SERVICE=$(echo $line|tr -d ' '| awk -F"SERVICE_NAME=" '{print $2 }'|cut -d')' -f1);
echo $TNSALIAS : $HOST : $SERVICE
fi;
done
-------------------------------
-- Top SQL by CPU
-------------------------------
select --TRUNC(s.END_INTERVAL_TIME),
ss.dbid, ss.sql_id, SUM(ss.executions_delta) "Execs", ROUND((SUM(ss.cpu_time_delta)/1000000 )/60) "CPU Time (minutes)"
, (select sql_text from dba_hist_sqltext sql where sql.dbid = ss.dbid and sql.sql_id = ss.sql_id) sql_text
from dba_hist_sqlstat ss
JOIN dba_hist_snapshot s on ss.dbid = s.dbid AND ss.INSTANCE_NUMBER = s.INSTANCE_NUMBER AND ss.SNAP_ID = s.snap_id
where 1=1
--and ss.sql_id = '8xcfc1knuja0s'
group by --TRUNC(s.END_INTERVAL_TIME),
ss.dbid, ss.sql_id
order by ROUND((SUM(ss.cpu_time_delta)/1000000 )/60) desc
;
#################################
# Extract data in csv format
#################################
set pagesize 0
set feedback off
set colsep ","
set trimout on
set trimspool on
set linesize 32767
set echo off
spool outputfile.csv
<<<<<<place_your_query_here>>>>>>
set term on
set feedback on
set colsep " "
set pagesize 100
set echo on
#################################
# Java Sleep in milliseconds
#################################
CREATE OR REPLACE PROCEDURE sleep(x_millis IN NUMBER) AS LANGUAGE JAVA
NAME 'java.lang.Thread.sleep(int)';
/
###########################
# queue row count
###########################
set lines 10000
set pages 10000
set feedback off
spool vg.sql
select 'select * from ( ' from dual
union all
SELECT 'SELECT ''' || owner || '.' || name || ''' as queue '
|| chr(13)|| ', ' || '(select ROUND(sum(bytes)/power(1024,2)) from dba_segments s where s.owner = ''' || owner || ''' and s.segment_name = ''' || q.queue_table || ''') as "Size(MB)"'
|| chr(13)|| ', ' || '(select count(1) from ' || owner || '.' || queue_table || ') as row_count'
|| chr(13)|| ', ' || retention || ' retention from dual union all'
FROM dba_queues q where q.owner like 'TRAF_%' and q.queue_type ='NORMAL_QUEUE'
union all
select ' select ''abc'' queue,0 "Size(MB)" , 1 rowcount,1 retention from dual' from dual
union all
select ') order by "Size(MB)" desc ; ' from dual
;
spool off
@html @./vg.sql
#################################
# DST Timezone
#################################
set lines 100
select version from v$timezone_file;
column property_name format a30
column value format a30
select property_name, substr(property_value, 1, 30) value from database_properties where property_name like 'DST_%' order by property_name;
select * from dba_tstz_tables where upgrade_in_progress = 'YES';
#################################
# IO Top (iotop)
#################################
watch -n 2 'grep read_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -15 ; echo ""; grep write_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -15 ;'
watch -n 2 'grep read_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -20'
watch -n 2 'grep write_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -20'
watch -n 2 'for stat in read_bytes write_bytes
do
for i in `egrep ^$stat /proc/*/io 2>/dev/null |sort -k2nr |head -10 |cut -d/ -f3`;
do
echo PID: $i - $stat: `egrep ^$stat /proc/$i/io` , cmd - `cat /proc/$i/cmdline`;
done
echo ""
done
'
----------------------------------------
-- DBA Registry
----------------------------------------
set lines 100
column comp_id format a20
column comp_name format a50
column status format a20
select comp_id, comp_name, status from dba_registry order by 1;
----------------------------------------
-- Database Options
----------------------------------------
set lines 100
column PARAMETER format a50
column value format a20
select PARAMETER, value from v$option order by 1;
#################################
# SNMP Query
#################################
snmpwalk -Os -c "public" -v 2c <IP/host>
snmpdelta -Cs -CT -c "public" -v 2c orabackprd01 ipInReceives.0
###############################
# RAC Service role check
###############################
export ORACLE_SID=12102
export ORAENV_ASK=NO
. /usr/local/bin/oraenv
for db in `srvctl config database`
do
export ORACLE_SID=$db
. /usr/local/bin/oraenv 2>/dev/null 1>&2
echo "######################### $db #####################"
for service in `srvctl config service -d $db|grep 'Service name' |cut -d: -f2|awk '{print $1}' |sort`
do
echo $service - `srvctl config service -d $db -s $service |grep 'Service role' |cut -d: -f2|awk '{print $1}'`
done
done