# print leading zero in 2 digit numbers in bash for i in $(seq -f "%02g" 1 15) do echo $i done for (( i=1; i<=17 ; i++ )); do printf '%02d\n' $i; done ---------------------------------------- -- Switch to Pluggable Database (PDB) ---------------------------------------- SQL> ALTER SESSION SET container = pdb1; SQL> ALTER SESSION SET container = cdb$root; SELECT t.name , min(m.begin_time) begin_time , max(m.end_time) end_time , SUM(m.physical_reads) physical_reads , ROUND(SUM(m.physical_block_reads * d.BLOCK_SIZE)/power(1024,2)) physical_reads_mb , SUM(m.physical_writes) physical_writes , ROUND(SUM(m.physical_block_writes * d.BLOCK_SIZE)/power(1024,2)) physical_writes_mb , MAX(m.average_read_time)/10 average_read_time_ms , MAX(m.average_write_time)/10 average_write_time_ms FROM v$filemetric m JOIN v$datafile d ON d.file# = m.file_id JOIN v$tablespace t ON d.TS# = t.ts# GROUP BY t.name ORDER BY physical_reads_mb + physical_writes_mb desc --ORDER BY physical_writes_mb desc --ORDER BY physical_writes desc --ORDER BY physical_reads_mb desc --ORDER BY physical_reads desc ; @session_active % 30 "" @sortusage @sortsegment @top_session_by_cpu % 30 @awr_report_rac @awr_report_rac "2015-09-23 00:00:00" "2015-03-01 00:02:00" html @top_session_by_all % 20 "redo size" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' " ---------------------------------------- -- DBA Registry ---------------------------------------- set lines 100 column comp_id format a20 column comp_name format a50 column status format a20 select comp_id, comp_name, status from dba_registry order by 1; # print leading zero in 2 digit numbers in bash for i in $(seq -f "%02g" 1 15) do echo $i done for (( i=1; i<=17 ; i++ )); do printf '%02d\n' $i; done ---------------------------------------- -- Switch to Pluggable Database (PDB) ---------------------------------------- SQL> ALTER SESSION SET container = pdb1; SQL> ALTER SESSION SET container = cdb$root; SELECT t.name , min(m.begin_time) begin_time , max(m.end_time) end_time , SUM(m.physical_reads) physical_reads , ROUND(SUM(m.physical_block_reads * d.BLOCK_SIZE)/power(1024,2)) physical_reads_mb , SUM(m.physical_writes) physical_writes , ROUND(SUM(m.physical_block_writes * d.BLOCK_SIZE)/power(1024,2)) physical_writes_mb , MAX(m.average_read_time)/10 average_read_time_ms , MAX(m.average_write_time)/10 average_write_time_ms FROM v$filemetric m JOIN v$datafile d ON d.file# = m.file_id JOIN v$tablespace t ON d.TS# = t.ts# GROUP BY t.name ORDER BY physical_reads_mb + physical_writes_mb desc --ORDER BY physical_writes_mb desc --ORDER BY physical_writes desc --ORDER BY physical_reads_mb desc --ORDER BY physical_reads desc ; -- ASH Queries --Long running queries from ASH @ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.user_id = (select user_id from dba_users where username = upper('kelting')) " @ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:05:00' and ash.service_hash = (select name_hash from dba_services where name = 'trafvaluationsse30')" @ash_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:20:00' " @ash_sid_sqlids % % % 24 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:00:15' and ash.user_id = 143 " @ash_sid_sqlids % % % 24 % "and trunc(ash.px_flags / 2097152) > 10" @ash_sid_sqlids % % % 1 % " and (ash.sample_time - ash.sql_exec_start) > '+0 00:00:20' and ash.force_matching_signtaure = '8251690068665765049' " -- ASH Queries (AWR) --Long running queries from ASH (AWR) select name_hash from dba_services where name = 'pr01pimi_sp_con'; -- 2950871375 select name_hash from dba_services where name = 'pr01pimi_job_con'; -- 815460948 select name_hash from dba_services where name = 'pr01pimi_ace_con'; -- 3379930795 select name_hash from dba_services where name = 'pr01pimi_sofia_con'; -- 3657673531 select user_id, username from dba_users where username like upper('%username%'); @awr_sid_sqlids % % % 24 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:10:00' and ash.user_id = 6089 " @awr_sid_sqlids % % % 5 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.service_hash = 2950871375 " "and ash.sample_time between to_timestamp('31-MAY-16 19:00:00','DD-MON-YY HH24:MI:SS') and to_timestamp('31-MAY-16 20:00:00','DD-MON-YY HH24:MI:SS')" @awr_sid_sqlids % % % 24*2 % "and (ash.sample_time - ash.sql_exec_start) > '+0 00:01:00' and ash.user_id = 3461" "and ash.sample_time between to_timestamp('13-SEP-15 03:13:00','DD-MON-YY HH24:MI:SS') and to_timestamp('12-SEP-15 09:12:00','DD-MON-YY HH24:MI:SS')" -- ASH Temp Usage @ash_sid_sqlids % % % 24 % "" "" " having MAX(NVL(ash.temp_space_allocated,0))/power(1024,3) > 1 " -- Temp usage > 1GB @ash_sid_details % % % 24 "and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB @ash_sid_sqlids % % % 24 % "and ash.user_id IN (select user_id from dba_users where temporary_tablespace ='TRAF_TITAN_TEMP')" "" " having SUM(ash.temp_space_allocated)/power(1024,3) > 1 " -- Temp usage > 1GB @ash_sid_details % % % 1 " and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB @awr_sid_sqlids % % % 24*1 % "" "" " having MAX(NVL(ash.temp_space_allocated,0))/power(1024,3) > 10 " -- Temp usage > 1GB @awr_sid_details % % % 1 " and ash.temp_space_allocated/power(1024,3) > 1 " -- Temp usage > 1GB @ash_sid_details % % % 24 "and ash.user_id IN (select user_id from dba_users where temporary_tablespace ='TRAF_TITAN_TEMP')" "and ash.temp_space_allocated/power(1024,1) > 1 " -- Temp usage > 1GB @awr_sid_sqlids % % % 7*24 % "and ash.sample_time between to_timestamp('10-JUN-16 06:00:00','DD-MON-YY HH24:MI:SS') and to_timestamp('10-JUN-16 07:00:00','DD-MON-YY HH24:MI:SS')" "" " having SUM(ash.temp_space_allocated)/power(1024,3) > 1 " -- Temp usage > 1GB -- ASH PGA USage @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.pga_allocated)/power(1024,3) > 2 " -- PGA usage > 2GB -- ASH Physical IO @ash_sid_sqlids % % % 1 % "" "" " having (SUM(ash.delta_read_io_bytes)+SUM(ash.delta_write_io_bytes) ) / power(1024,3) > 20" -- Physical Read/Write > 20GB @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_read_io_bytes) / power(1024,3) > 20" -- Physical Read > 20GB @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_write_io_bytes) / power(1024,3) > 20" -- Physical Write > 20GB @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_interconnect_io_bytes) / power(1024,3) > 20" -- Interconnect IO > 20GB @ash_sid_details % % % 5 "and (ash.delta_read_io_bytes+ash.delta_write_io_bytes)/ power(1024,3) > 1 " -- Physical Read > 20GB @ash_sid_sqlids % % % 1 % "and ash.user_id = (select user_id from dba_users where username = upper('username')) " "" " having SUM(ash.delta_read_io_bytes) / power(1024,3) > 20 " @ash_sid_sqlids % % % 1 % "" "" " having (SUM(ash.delta_read_io_requests)+SUM(ash.delta_write_io_requests) ) > 1000000" -- Physical Read/Write IOPS @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_read_io_requests) > 1000000" -- Physical Read IOPS @ash_sid_sqlids % % % 1 % "" "" " having SUM(ash.delta_write_io_requests) > 1000000" -- Physical Write IOPS -- Details of session with long running queries from ASH @ash_sid_details % % % 5 "and (ash.sample_time - ash.sql_exec_start) > '+0 00:05:00' and ash.service_hash = (select name_hash from dba_services where name = 'pr01pimi_sp_con') " @top_session_by_all % 20 "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "redo size" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "CPU used by this session" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "consistent gets" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "physical reads" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "physical read total bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "physical read total bytes optimized" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "cell physical IO interconnect bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "cell physical IO bytes saved by storage index" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' " @top_session_by_statname % 20 "physical writes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "physical write total bytes" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "physical write total bytes optimized" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' AND s.status = 'ACTIVE'" @top_session_by_statname % 20 "session pga memory" "" @top_session_by_statname % 20 "session pga memory max" "" @top_session_by_statname % 20 "bytes sent via SQL*Net to client" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' " @top_session_by_statname % 20 "bytes received via SQL*Net from client" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' " @top_session_by_statname % 20 "execute count" "AND s.type <> 'BACKGROUND' AND s.username <> 'USERNAME' " redo size CPU used by this session consistent gets cell physical IO bytes saved by storage index cell physical IO interconnect bytes cell physical IO interconnect bytes returned by smart scan physical read total bytes physical read total bytes optimized physical write total bytes physical write total bytes optimized bytes received via SQL*Net from client bytes received via SQL*Net from dblink bytes sent via SQL*Net to client bytes sent via SQL*Net to dblink @session_breakdown @logswitchsize @awr_loadprofile_physicalreads % 30 @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "CPU used by this session" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "physical read total bytes" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "physical write total bytes" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "session pga memory" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "redo size" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (disk)" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (memory)" @awr_loadprofile_by_statname_delta % "2015-09-23 00:00:00" "2015-03-01 00:02:00" "sorts (rows)" @awr_loadprofile_logicalreads % 7 @awr_loadprofile_physicalreads % 7 @awr_loadprofile_physicalwrites % 7 @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "CPU used by this session" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "physical read total bytes" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "physical write total bytes" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "session pga memory" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "redo size" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (disk)" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (memory)" @awr_loadprofile_by_statname_hourlybreakdown_delta % "2015-09-23 00:00:00" "2015-03-21 01:05:00" "sorts (rows)" -- Last 2 hours @awr_top_sql_by_all 2 % "" "" 10 -- Last 10 hours @awr_top_sql_by_all 10 % "" "" 10 -- Between a specific timeframe @awr_top_sql_by_all % % "2015-09-28 00:00:00" "2015-10-05 00:05:00" 10 @awr_top_sql_by_all % % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_weight % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.sql_id not in ('d7y4tdacc7f3j','5ysyjtdsjwsr2','7akf6xvjp5a6d','3s2u15uzjum48','5ysyjtdsjwsr2','4sp1tg6fv1a9p','bcv9qynmu1nv9','cm5vu20fhtnq1','7wv97t3szuvtd','ar9nmtmd28460','4m7m0t6fjcs5x,'2skwhauh2cwky','12kw3xcxv1qpg','9tgj4g8y4rwy8')" @awr_top_sql_weekly_client @awr_top_sql_weekly_client % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 @awr_top_sql_weekly_client % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'username'" @awr_top_sql_weekly_client % % "2015-01-27 14:00:00" "2015-01-27 19:02:00" 10 "and ss.parsing_schema_name like 'username'" @awr_sqlstats_weekly 1xnhn6v30q1t0 @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.sql_id in ('47604xybfj03j')" @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name = 'username'" @awr_top_sql_by_all % % "2015-06-01 00:00:00" "2015-06-04 00:02:00" 10 "and ss.parsing_schema_name like 'username'" @awr_top_sql_by_weight % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'username'" @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'FES%'" @awr_top_sql_by_weight2 % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 30 "and ss.parsing_schema_name like 'AE%'" @awr_top_sql_by_all % % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "and ss.parsing_schema_name like 'AE_%'" @awr_top_sql_by_cpu % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_exec % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_elapsed_time % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_physicalread % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_physicalwrite % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql_by_buffer_gets % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "redo size" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "elapsed_time_total" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "executions_total" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_change_bytes_total" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_write_bytes_total" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "physical_read_bytes_total" "&&WHERECLAUSE" @awr_top_sql % "2015-09-23 00:00:00" "2015-03-01 00:02:00" 10 "buffer_gets_total" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "db_block_changes" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_reads" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_writes" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_reads_direct" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "physical_writes_direct" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "logical_reads" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "space_used" "&&WHERECLAUSE" @awr_top_segment % "2014-01-14 01:00:00" "2014-01-14 06:35:00" 10 "space_allocated" "&&WHERECLAUSE" AWR Top Segment Statistics ################################## db_block_changes physical_reads physical_writes physical_reads_direct physical_writes_direct logical_reads space_used space_allocated table_scans itl_waits row_locks_waits chain_row_excess buffer_busy_waits gc_buffer_busy gc_cr_blocks_served gc_cu_blocks_served gc_cr_blocks_received gc_cu_blocks_received PGA Usage Vs Temp Usage Statistics ##################################### 'workarea memory allocated' - the total amount of PGA memory dedicated to workareas allocated in Kb. 'workarea executions - optimal' - the cumulative count of work areas which had an optimal size. For example optimal size is defined if the sort does not need to spill to the disk 'workarea executions - onepass' - the cumulative count of work areas using the one pass size. One pass is generally used for big work areas where spilling to disk cannot be avoided. 'workarea executions - multipass' - the cumulative count of work areas running in more than one pass. This should be avoided and is the symptom of poorly tuned system. @statname "%physical%" @awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea memory allocated" @awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - optimal" -- In-memory (PGA) execution @awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - onepass" -- Execution spilling to tempspace @awr_statistics_delta % "2015-09-23 00:00:00" "2015-09-29 00:00:00" "workarea executions - multipass" #Row Chaining @awr_statistics_delta % "2015-09-28 00:00:00" "2015-09-29 00:00:00" "table fetch continued row" @awr_statistics_delta % "2015-09-28 00:00:00" "2015-09-29 00:00:00" "table fetch by rowid" @awr_pga_stats_delta_by_statname % "2015-09-23 00:00:00" "2015-09-23 00:00:00" "total PGA allocated" @pga_stats -- This will give you the PGA stat name ##################################################### aggregate PGA target parameter aggregate PGA auto target cache hit percentage total PGA allocated total PGA inuse total freeable PGA memory total PGA used for auto workareas total PGA used for manual workareas max processes count process count maximum PGA allocated maximum PGA used for auto workareas maximum PGA used for manual workareas over allocation count bytes processed extra bytes read/written PGA memory freed back to OS recompute count (total) global memory bound @awr_service_stats %grid% "logons cumulative" "2013-04-04 12:00:00" "2015-03-21 01:05:00" System Events ###################### @eventname "%log%switch%" @awr_system_event_time "log file switch (archiving needed)" 3 @awr_system_event_time "log file switch (archiving needed)" 3 @awr_system_event_time "log file switch (checkpoint incomplete)" 3 @awr_system_event_time "log file switch completion" 3 @awr_system_event_time "user I/O wait time" 3 set tab off set lines 190 pages 0 arraysize 63 select * from table(dbatavaria.jss.gtop(50)); ######################## # RAC Commands ###################### # Check CRS Stack internal resources status. dcli -l oracle -g ~/dbs_group 'crsctl stat res -t -init -w "((NAME != ora.drivers.acfs) AND (NAME != ora.diskmon))"' |grep -v 'ONLINE ONLINE' |egrep -B1 'INTERMEDIATE|OFFLINE' |egrep -v '^--' for host in `olsnodes` do echo "###### $host #######" ssh $host 'crsctl stat res -t -init -w "((NAME != ora.drivers.acfs) AND (NAME != ora.diskmon))" |grep -v "ONLINE ONLINE" |egrep -B1 "INTERMEDIATE|OFFLINE" ' \ 2>/dev/null |egrep -v '^--' done # Check CRS Resource dependency crsctl status res -p -init |egrep '^NAME=|DEPENDE' | grep -B1 ora.gpnpd crsctl status res -p |egrep '^NAME=|^STOP_DEPENDENCIES' | grep -B1 ora.oc4j crsctl status res -p |egrep '^NAME=|^START_DEPENDENCIES' | grep -B1 ora.oc4j crsctl status res -p -init |egrep '^NAME=|DEPENDENCIES' | while read line do done ######################## # Search Alert on Exadata ###################### less $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group 'egrep -A10 "Aug 01.*2012" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep "2012|ORA-00600|trc" | grep -B1 ORA- |egrep -v "^--" ' dcli -l oracle -g ~/dbs_group_pr01pimi 'egrep -A10 "Mar 31.*2014" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v "opiodr|ORA-3136" | egrep "2014|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" ' dcli -l oracle -g ~/dbs_group_bt01pimi 'egrep -A10 "Mar 31.*2014" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v "opiodr|ORA-3136" | egrep "2014|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" ' dcli -l oracle -c dm02db04,dm02db05,dm02db06 'egrep -A10 "Oct 16.*2013" $ORACLE_BASE/diag/rdbms/`echo $ORACLE_SID|cut -b 1-8`/$ORACLE_SID/trace/alert_$ORACLE_SID.log |egrep -v opiodr | egrep "2013|ORA-|trc" | egrep -B1 "ORA-|trc" |egrep -v "^--" ' dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group 'find /u01/app/oracle/diag/rdbms/pr01pimi/ -name '*trc' -exec grep -l deadlock {} \; ' ############################## # Search deadlock on Exadata ############################### dcli -l oracle -g ~/dbs_group "find /u01/app/oracle/diag/rdbms/pr01pimi -name '*trc' -mtime -1 -exec grep -l deadlock {} \; " dcli -l oracle -g ~/dbs_group "find /u01/app/oracle/diag/rdbms/pr01pimi -name '*trc' -mtime -1 -exec grep -l deadlock {} \; " ##################################### # Search Listener Log on Exadata ##################################### -- Verify SCAN listener start times dcli -l oracle -g ~/dbs_group 'ps -ef|grep tns|grep SCAN|grep -v grep ' cat ~/dbs_group_pr01pimi cat ~/dbs_group_bt01pimi --Scan Listeners less $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener_scan*/trace/listener_scan*.log dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group "grep 07-AUG-2012 $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener_scan*/trace/listener_scan*.log |egrep 'vamiot' " DMZ - /u01/oracle/11.2.0.3/grid/log for HOST in `olsnodes` do ssh $HOST "egrep 'MAY-2013' /u01/app/oracle/diag/tnslsnr/${HOST}/listener/trace/listener.log \ | egrep -v 'service_update' \ 2> /dev/null " 2> /dev/null done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' for HOST in `cat ~/dbs_group_bt01pimi` do ssh $HOST "egrep 'JUL-2014' /u01/app/oracle/diag/tnslsnr/${HOST}/listener/trace/listener.log \ | egrep 'Optimizer' |egrep mpilyavs \ 2> /dev/null " 2> /dev/null done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' srvctl status scan_listener |grep running|awk '{print $3 "," $8}' |while read line do HOST=`echo $line|cut -d, -f2` LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]` ssh $HOST "grep '01-OCT-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \ |egrep -v 'service_update' |grep sas " done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line do HOST=`echo $line|cut -d, -f2` LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]` ssh $HOST "egrep '09-MAY-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \ |egrep -v 'service_update' |grep dabhi " done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr #### Symphony Grid Connections srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line do HOST=`echo $line|cut -d, -f2` LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]` ssh $HOST "egrep \"`date --date=yesterday +%d-%b-%Y|tr [:lower:] [:upper:]`|`date +%d-%b-%Y|tr [:lower:] [:upper:]`\" /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/${LISTENER}/trace/${LISTENER}.log \ |grep sas " done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-3 |uniq -c srvctl status scan_listener |grep running|awk '{print $3 "," $8}'|while read line do HOST=`echo $line|cut -d, -f2` LISTENER=`echo $line|cut -d, -f1 | tr [:upper:] [:lower:]` ssh $HOST "egrep -h '31-JUL-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/listener_scan*/trace/listener_scan*.log \ |grep sas | grep devmtg| grep python " done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-2 |uniq -c for HOST in `olsnodes` do ssh $HOST "egrep -h '31-JUL-2013' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/${HOST}/listener_scan*/trace/listener_scan*.log \ | egrep -v 'service_update' |egrep -i 'sas' \ 2> /dev/null " 2> /dev/null done | sort -k1,2 | awk '{print $1 " " $2 " " $4 " " $10 " " $11 " " $12}' | cut -d: -f1-3 |uniq -c 24-OCT-2013 01:39:15 * (CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=servername)(CID=(PROGRAM=sqlplus)(HOST=hostname)(USERUSER=username))) * (ADDRESS=(PROTOCOL=tcp)(HOST=144.77.86.141)(PORT=60263)) * establish * servicename * 0 # Filter for String | awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr | awk -F"SERVICE_NAME=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr | awk -F"PROGRAM=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr | awk -F"HOST=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c |sort -k1nr | awk ' { date=$1; time=$2; program=""; service_name=""; user=""; host=""; n=split($0 , array1, "(" ); for (i=1; i<=n; i++) { split(array1[i] ,array2 , ")" ) split(array2[1] ,array3 , "=") field=array3[1] value=array3[2] if (field == "PROGRAM") { program=value} if (field == "SERVICE_NAME") { service_name=value} if (field == "USER") { user=value} if (field == "HOST") { if (host=="" || host=="__jdbc__") { host=value} } } print date " " time " * " service_name " * " host " * " user " * " program " * " ; } ' # Add following for logins/day |awk '{print $1}' |uniq -c # Add following for logins/hour |cut -d: -f1 |uniq -c # Add following for logins/10min |cut -c1-16 |uniq -c # Add following for logins/min |cut -d: -f1-2 |uniq -c # Add following for logins/sec |cut -d" " -f1-2 |uniq -c # Add following for logins/service_name | awk -F"SERVICE_NAME=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr # Add following for logins/program | awk -F"PROGRAM=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr # Add following for logins/clienthost | awk -F"HOST=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr # Add following for logins/user | awk -F"USER=" '{print $2}' |awk -F")" '{print $1}' |sort |uniq -c | sort -k1nr -- Host Listeners less $ORACLE_BASE/diag/tnslsnr/`hostname -s`/listener/trace/listener.log dcli -l oracle -g /opt/oracle.SupportTools/onecommand/dbs_group "grep 1*-AUG-2012 $ORACLE_BASE/diag/tnslsnr/\`hostname -s\`/listener/trace/listener.log |egrep 'trs20'" ##################################### # Exadata verify tnsnames.ora ##################################### firstnode=`head -1 dbs_group` dcli -l oracle -g dbs_group "scp $ORACLE_HOME/network/admin/tnsnames.ora $firstnode:/tmp/tnsnames.ora_\`hostname -s\`" cat dbs_group |grep -v $firstnode |while read i do echo "######## diff /tmp/tnsnames.ora_$firstnode /tmp/tnsnames.ora_$i #############" diff /tmp/tnsnames.ora_$firstnode /tmp/tnsnames.ora_$i done ##### Sync the tnsnames.ora firstnode=`head -1 dbs_group` cp -p /tmp/tnsnames.ora_$firstnode $ORACLE_HOME/network/admin/tnsnames.ora dcli -l oracle -g dbs_group -d $ORACLE_HOME/network/admin/ -f $ORACLE_HOME/network/admin/tnsnames.ora ##################################### # Kill Sessions ##################################### BEGIN FOR i in ( select 'alter system kill session ''' || sid || ',' || serial# || ',@' || inst_id || ''' immediate' sqltext from gv$session s where 1=1 AND s.status <> 'KILLED' AND s.program like '%%' -- AND s.username like '%%' AND s.osuser like '@%' -- AND s.sql_id = '0ggabydqkkgrf' -- AND s.service_name = '%%' ORDER BY s.logon_time desc ) LOOP begin execute immediate i.sqltext; exception WHEN OTHERS THEN null; END; END LOOP; END; / select 'alter system kill session ''' || sid || ',' || serial# || ',@' || inst_id || ''';' , s.program from gv$session s where s.program like '%proteusNightCycle%'; ############################# GoldenGate (Run in BASH shell) ############################# help stats extract help stats replicat # Tables ordered by operations #echo 'STATS PPRD041 , HOURLY , REPORTRATE SEC' |./ggsci |egrep 'Replicating|Extracting|Total operations/second' | while read line ; #echo 'STATS PPRD041 , HOURLY , REPORTRATE MIN' |./ggsci |egrep 'Replicating|Extracting|Total operations/minute' | while read line ; #echo 'STATS PPRD041 , HOURLY , REPORTRATE HR' |./ggsci |egrep 'Replicating|Extracting|Total operations/hour' | while read line ; #echo 'STATS PPRD041 , DAILY , REPORTRATE SEC' |./ggsci |egrep 'Replicating|Extracting|Total operations/second' | while read line ; #echo 'STATS PPRD041 , DAILY , REPORTRATE MIN' |./ggsci |egrep 'Replicating|Extracting|Total operations/minute' | while read line ; #echo 'STATS PPRD041 , DAILY , REPORTRATE HR' |./ggsci |egrep 'Replicating|Extracting|Total operations/hour' | while read line ; # echo 'STATS PPRD041 , DAILY ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ; #echo 'STATS PPRD041 , LATEST ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ; #echo 'STATS PPRD041 , HOURLY ' |./ggsci |egrep 'Replicating|Extracting|Total operations' | while read line ; do if [[ "$line" == *Extracting* ]] || [[ "$line" == *Replicating* ]] then printf "\n`echo $line|awk '{print $5}'`" ; elif [[ "$line" == *since* ]] then printf " $line" ; else [[ "$line" == *operations* ]] printf "\t\t `echo $line|awk '{print $3}'`" ; fi done |sort -k2nr 2>/dev/null |grep -v '^$' |head -20 ; echo ""; INFO PPRD041 detail INFO PPRD041 SHOWCH send PPRD041, showtrans count 5 TABULAR STATS PPRD041 , LATEST REPORTRATE SEC STATS PPRD041 , LATEST REPORTRATE MIN STATS PPRD041 , LATEST REPORTRATE HR STATS PPRD041 , LATEST STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, REPORTRATE HR STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE table STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE dbo.trade_block , REPORTRATE HR STATS PPRD041 , TOTALSONLY * STATS PPRD041 , LATEST * STATS PPRD041 , TOTALSONLY dbo.pos_pr_daily_temp STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TABLE dbdbo.swap_trade STATS PPRD041 , TABLE dbo.swap_trade, TOTALSONLY * STATS PPRD041 , TOTAL , TABLE dbo.pos_pr_daily_temp STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE HR STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE MIN STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE dbo.pos_pr_daily_temp REPORTRATE SEC STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST, TOTALSONLY FUNDSTATION_OWN.* , NOREPORTDETAIL , REPORTRATE HR STATS PPRD041 , TOTAL , DAILY, HOURLY, LATEST , TABLE FUNDSTATION_OWN.RATE , REPORTRATE HR STATS PPRD041 table dbo.af_detail_trans STATS PPRD041 table dbo.af_detail_trans_split STATS PPRD041 , TOTALSONLY * dbo.acct_position dbo.acct_position_by_sector View Trail file contents ################################ #Count the records every n minutes interval. logdump < detail EOF logdump < position 22974377 position forward next or position reverse next #### Go reverse from an RBA to end of transaction and then forward position 8355707 detail off position reverse SCANFORENDTRANS position forward next info REP_AM05 stop REP_AM05 info REP_AM05 alter REP_AM05 extseqno 359786 extrba 0 start REP_AM05 info REP_AM05 connect target sys/*****@TNS connect AUXILIARY / run { CONFIGURE DEVICE TYPE DISK PARALLELISM 24; SET NEWNAME FOR DATABASE to '+RECO_DM02_EXT'; DUPLICATE TARGET DATABASE TO BT01PIMI until SCN 503299730741 OPEN RESTRICTED ; } connect target sys/*****@TNS connect AUXILIARY / run { SET NEWNAME FOR DATABASE TO '+DATA_DM51'; DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE DORECOVER SPFILE SET "db_unique_name"="db_unique_name" SET FAL_SERVER="akslfks" SET LOG_ARCHIVE_DEST_2="SERVICE=pr01hsti ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) compression=enable" SET DB_FILE_NAME_CONVERT=("+DATA/","+RECO_DM51/","+RECO/","+RECO_DM51/") SET LOG_FILE_NAME_CONVERT=("+DATA/","+RECO_DM51/","+RECO/","+RECO_DM51/") set standby_file_management='AUTO' ; } create spfile='+DATA_DM51/dbname/spfiledbname.ora' from pfile='/u01/app/oracle/product/11.2.0.2/dbhome_1/dbs/initdbname.ora'; allocate channel for maintenance device type disk; delete noprompt archivelog all completed before 'sysdate - 1' ; --************************************************** -- Auditing Queries --************************************************** SELECT * FROM gv$system_parameter where name like '%audit%' order by name, inst_id; SELECT * FROM DBA_STMT_AUDIT_OPTS; SELECT * FROM DBA_PRIV_AUDIT_OPTS; SELECT * FROM DBA_OBJ_AUDIT_OPTS; SELECT * FROM STMT_AUDIT_OPTION_MAP; SELECT * FROM ALL_DEF_AUDIT_OPTS; SELECT * FROM DBA_AUDIT_EXISTS; SELECT * FROM DBA_FGA_AUDIT_TRAIL; SELECT * FROM DBA_COMMON_AUDIT_TRAIL; SELECT * FROM DBA_AUDIT_STATEMENT; SELECT * FROM DBA_AUDIT_SESSION; SELECT * FROM DBA_AUDIT_OBJECT; SELECT * FROM DBA_AUDIT_POLICIES; SELECT * FROM DBA_AUDIT_POLICY_COLUMNS; AUDIT SESSION WHENEVER NOT SUCCESSFUL; AUDIT SESSION BY ; NOAUDIT SESSION; NOAUDIT SESSION BY ; AUDIT DELETE ON BY SESSION WHENEVER SUCCESSFUL ; AUDIT DELETE ON BY ACCESS WHENEVER SUCCESSFUL ; AUDIT DELETE ON BY SESSION WHENEVER NOT SUCCESSFUL ; AUDIT DELETE ON BY ACCESS WHENEVER NOT SUCCESSFUL ; AUDIT DELETE ON BY SESSION ; -- regardless of success or failure AUDIT DELETE ON BY ACCESS ; -- regardless of success or failure ####################################### ##### Dropping temp segments ####################################### SQL> select ts#, name from sys.ts$ where name like 'TEMP%'; alter session set events 'immediate trace name drop_segments level '; alter session set events 'immediate trace name drop_segments level 5'; alter session set events 'immediate trace name drop_segments level 101'; alter session set events 'immediate trace name drop_segments level 115'; alter session set events 'immediate trace name drop_segments level 99'; alter session set events 'immediate trace name drop_segments level 118'; alter session set events 'immediate trace name drop_segments level 100'; ####################################### ##### OLAP Cubes ####################################### ##### Workaround to fix the OLAP repoistory issue with a particular CUBE ##### (with suitable replacement of my_aw). begin dbms_aw.execute(q'! aw attach my_aw RW _repos clear update commit aw detach my_aw aw attach my_aw RW allcompile update commit aw detach my_aw !'); end; / -- Clear and re-calculate the Cube Aggregations BEGIN DBMS_CUBE.BUILD(q'! CRM_DW_DMZ_OWN.TRANSACTION_CUBE USING ( FOR CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM WHERE CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM.DIM_KEY IS DESCENDANT OR SELF OF 'CM Apr 2013' WITHIN CRM_DW_DMZ_OWN.ALLOCATION_DATE_DIM.CALENDAR_HIERARCHY BUILD(CLEAR AGGREGATES, SOLVE) ) !' , add_dimensions=>false ); END; / ##### Cube build Logging. ##### -- Create Logging Tables BEGIN SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_build , tblname => dbms_cube_log.default_name(dbms_cube_log.type_build) ); SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_dimension_compile, tblname => dbms_cube_log.default_name(dbms_cube_log.type_operations) ); SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_operations , tblname => dbms_cube_log.default_name(dbms_cube_log.type_dimension_compile) ); SYS.DBMS_CUBE_LOG.TABLE_CREATE(log_type => sys.dbms_cube_log.type_rejected_records , tblname => dbms_cube_log.default_name(dbms_cube_log.type_rejected_records) ); END; / -- Query Default logging table names SELECT dbms_cube_log.default_name(dbms_cube_log.type_build) "Build" , dbms_cube_log.default_name(dbms_cube_log.type_operations) "Operations" , dbms_cube_log.default_name(dbms_cube_log.type_dimension_compile) "DimensionCompile" , dbms_cube_log.default_name(dbms_cube_log.type_rejected_records) "RejectedRecords" FROM dual; -- Enable CUBE Build Logging (*** in current session ***) /* When you pass first parameter (log_type) as NULL it set for dimension_compile, rejected_records,operations log_types. */ /* So if you passed log_type = NULL and log_location = CUBE_BUILD_LOG, it will try to log non-build records mentioned above as to this table. */ BEGIN dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_OPERATIONS ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM); dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_REJECTED_RECORDS ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM); dbms_cube_log.enable(DBMS_CUBE_LOG.TYPE_DIMENSION_COMPILE ,DBMS_CUBE_LOG.TARGET_TABLE,DBMS_CUBE_LOG.LEVEL_MEDIUM); dbms_cube_log.set_parameter(DBMS_CUBE_LOG.TYPE_REJECTED_RECORDS ,1,100); -- Set Max Errors to log dbms_cube_log.set_parameter(DBMS_CUBE_LOG.TYPE_DIMENSION_COMPILE ,1,100); -- Set Max Errors to log END ; / -- Cube build BEGIN DBMS_CUBE.BUILD( method => 'CCCCCCCCCCCC' , refresh_after_errors => false , parallelism => 8 , atomic_refresh => false , automatic_order => false , add_dimensions => false , scheduler_job => 'OLAP$_' || CRM_DW_DMZ_OWN.CUBE_BUILD_ID.nextval , master_build_id => CRM_DW_DMZ_OWN.CUBE_BUILD_ID.nextval , nested => false , script => ' CRM_DW_DMZ_OWN.CONTACT_PARTNERSHIPS_DIM USING ( LOAD NO SYNCH, COMPILE SORT ), CRM_DW_DMZ_OWN.TRANSACTION_CUBE' ); END; / -- Check Progress ALTER SESSION SET CURRENT_SCHEMA = CRM_DW_DMZ_OWN; SELECT * FROM dba_sequences WHERE sequence_owner = SYS_CONTEXT('USERENV','CURRENT_SCHEMA'); SELECT * FROM dba_scheduler_running_jobs; SELECT * FROM CRM_DW_DMZ_OWN.cube_build_log; SELECT * FROM CRM_DW_DMZ_OWN.cube_dimension_compile ; SELECT * FROM CRM_DW_DMZ_OWN.cube_operations_log; SELECT * FROM CRM_DW_DMZ_OWN.cube_rejected_records; SELECT TIME, build_id, slave_number, command, status, build_object, build_object_Type ,partition, output FROM cube_build_log where build_id = 46 order by time desc; SELECT build_id , slave_number , build_object , build_object_type , partition , command , NVL(MAX(DECODE(status , 'COMPLETED',NVL(TIME,SYSDATE), NULL)),systimestamp) - MIN(DECODE(status , 'STARTED',TIME, NULL)) duration , MIN(DECODE(status , 'STARTED',TIME, NULL)) started , MAX(DECODE(status , 'COMPLETED',TIME, NULL)) completed FROM cube_build_log WHERE build_id = 46 GROUP BY build_id , slave_number , build_object , build_object_type , partition , command ORDer by started desc ; SELECT build_id , slave_number , build_object , build_object_type , partition , command , NVL(MAX(DECODE(status , 'COMPLETED',NVL(TIME,SYSDATE), NULL)),systimestamp) - MIN(DECODE(status , 'STARTED',TIME, NULL)) duration , MIN(DECODE(status , 'STARTED',TIME, NULL)) started , MAX(DECODE(status , 'COMPLETED',TIME, NULL)) completed FROM cube_build_log WHERE build_id = 46 AND command = 'BUILD' GROUP BY build_id , slave_number , build_object , build_object_type , PARTITION , command HAVING MAX(DECODE(status , 'COMPLETED',TIME, NULL)) IS NOT NULL AND partition IS NOT NULL ORDer by started desc ; SELECT a.build_id, a.slave_number, s.status, a.command,a.time, a.partition, a.build_object, a.build_object_type , s.event, s.status FROM cube_build_log a JOIN DBA_SCHEDULER_RUNNING_JOBS j ON j.owner = a.owner AND a.scheduler_job = j.job_name JOIN gv$session s ON s.inst_id = j.running_instance AND j.session_id = s.SID WHERE 1=1 AND A.TIME = (SELECT MAX(TIME) FROM cube_build_log c WHERE C.build_id = A.build_id AND A.PARTITION = c.PARTITION GROUP BY c.build_id, c.PARTITION) AND NOT EXISTS (SELECT 1 FROM cube_build_log b WHERE b.build_id = A.build_id AND A.PARTITION = b.PARTITION AND A.command = b.command AND b.status = 'COMPLETED') AND A.build_id = 46 ORDER BY a.TIME DESC ; ####################################### ##### Analytic Workspace (AW , AWS) ####################################### select * from dba_aws where owner = 'CRM_DW_OWN'; select * from dba_aw_obj where owner = 'CRM_DW_OWN' and aw_name = 'MSCRM_DMART'; select * from dba_aw_prop where owner = 'CRM_DW_OWN'; select * from v$aw_session_info; select * from v$aw_olap; select * from v$aw_longops; select * from v$aw_aggregate_op; select * from v$aw_allocate_op; oradebug setmypid oradebug tracefile_name oradebug close_trace oradebug ulimit MOS Note ID [742375.1] - How to Recreate an AW Through Export and Import Commands dbms_aw.execute Syntax EXPORT export_item TO EIF FILE file-name [LIST] [NOPROP] - [NOREWRITE|REWRITE] [FILESIZE n [K, M, or G]] - [NOTEMPDATA] [NLS_CHARSET charset-exp] [AGGREGATE | NOAGGR] - [API | NOAPI] where AGGREGATE - Export aggregated data. (Default behavior.) NOAGGR - Do not export aggregated data. -- Exporting AW Metadata and Data to an EIF file -- The keyword 'API' makes sure that all metadata is exported with the EIF. -- Such an EIF file can be imported again with the same option. -- The keyword 'NOAPI' prevents the export of the metadata and also the creation of such metadata during import. -- NOAPI also the default setting, so make sure you make the right choice during the export. Also note that there is no way -- to re-create the metadata for an EIF file exported with the 'NOAPI' setting. exec dbms_aw.execute('export all to eif file ''DIRECTORY_OBJECT/export.eif'' api'); -- Importing AW Metadata and Data to an EIF file exec dbms_aw.execute('aw create AWNAME'); exec dbms_aw.execute('aw attach AWNAME rw'); exec dbms_aw.execute('import all from eif file ''my_dir/export_file_name.eif'' '); exec dbms_aw.execute('update'); commit; exec dbms_aw.execute('aw detach AWNAME'); -- Exporting AW Metadata to an XML file exec dbms_aw.execute('aw attach . rw'); exec dbms_cube.export_xml_to_file( object_ids => '..AW', output_dirname => 'DATA_DUMP_DIR2', output_filename => 'AW_Metadata.xml'); exec dbms_aw.execute('aw detach mscrm_dmart'); -- Exporting AW Metadata to an XML file (Advanced) set serveroutput on DECLARE optionsClob CLOB; fh utl_file.file_type; buffer VARCHAR2(4000); amount INTEGER := 4000; BEGIN dbms_lob.createtemporary(optionsClob, false, dbms_lob.call); dbms_cube.create_export_options(out_options_xml => optionsClob , target_version => '11.2.0.2' -- Default NULL , suppress_owner => TRUE -- Default FALSE , suppress_namespace => TRUE -- Default FALSE , preserve_table_owners => TRUE -- Default FALSE , metadata_changes => NULL -- Default NULL ); fh := utl_file.fopen( location => 'DATA_DUMP_DIR2' , filename => 'AW_export_options.xml' , open_mode => 'w' , max_linesize => '32767' ); dbms_lob.read(optionsClob, amount, 1,buffer); utl_file.put_line ( fh, buffer ); utl_file.fclose ( fh); dbms_output.put_line(optionsClob); END; / exec dbms_aw.execute('aw attach . ro'); exec dbms_cube.export_xml_to_file( object_ids => '..AW' , options_dirname => 'DATA_DUMP_DIR2' , options_filename => 'AW_export_options.xml' , output_dirname => 'DATA_DUMP_DIR2' , output_filename => 'AW_Metadata.xml' ); exec dbms_aw.execute('aw detach mscrm_dmart'); -- Importing AW Metadata from an XML file exec dbms_aw.execute('aw delete .'); exec dbms_aw.execute('aw create .'); exec dbms_aw.execute('aw attach . rw'); exec dbms_cube.import_xml( dirname=>'DATA_DUMP_DIR2', filename =>'AW_Metadata.xml'); exec dbms_aw.execute('aw detach mscrm_dmart'); ####################################### ##### Proxy Connect Syntax ####################################### alter user pm_own grant connect through vgupta; connect vgupta[PROXYUSER]/password@SID ########################## DiskGroup-TableSpace Size ########################## select tablespace_name , SUBSTR(file_name,1, INSTR(file_name,'/') - 1) DiskGroup , count(1) File_count , ROUND(sum(bytes)/1024/1024/1024) size_GB from dba_data_files where tablespace_name = 'PM_DATA_BIG' group by tablespace_name , SUBSTR(file_name,1, INSTR(file_name,'/') - 1) ORDER BY 1,2 ; select t.name , SUBSTR(f.name,1, INSTR(f.name,'/') - 1) DiskGroup , count(1) File_count , ROUND(sum(f.bytes)/1024/1024/1024) size_GB from v$datafile f, v$tablespace t where f.ts# = t.ts# and t.name = 'PM_DATA_BIG' group by t.name , SUBSTR(f.name,1, INSTR(f.name,'/') - 1) ORDER BY 1,2 ; --************************************* -- DBMS Scheduler Job Queries --************************************* exec dbms_scheduler.stop_job(job_name => 'ADV_OWN.ADV_DEAL_COLLAT_INTEX_VW_JOB' ,force => TRUE); exec dbms_scheduler.run_job (job_name => 'ADV_OWN.ADV_DEAL_COLLAT_INTEX_VW_JOB' ,use_current_session => FALSE); -- Scheduler Jobs SELECT * FROM dba_scheduler_jobs WHERE 1=1 AND owner LIKE '%%' AND JOB_NAME LIKE '%%' ; -- Scheduler Jobs Run Details SELECT * FROM dba_scheduler_job_run_details WHERE 1=1 AND owner LIKE '%%' AND JOB_NAME like '%%' ORDER BY log_date DESC ; -- SELECT * FROM DBA_SCHEDULER_RUNNING_JOBS ; SELECT * FROM DBA_SCHEDULER_JOBS ; SELECT * FROM DBA_SCHEDULER_JOB_RUN_DETAILS ; SELECT * FROM DBA_SCHEDULER_JOB_LOG ; -- SELECT * FROM DBA_SCHEDULER_GLOBAL_ATTRIBUTE ; -- SELECT * FROM DBA_SCHEDULER_GROUPS ; SELECT * FROM DBA_SCHEDULER_GROUP_MEMBERS ; SELECT * FROM DBA_SCHEDULER_JOB_CLASSES ; -- SELECT * FROM DBA_SCHEDULER_SCHEDULES ; SELECT * FROM DBA_SCHEDULER_WINDOW_GROUPS ; SELECT * FROM DBA_SCHEDULER_WINGROUP_MEMBERS ; SELECT * FROM DBA_SCHEDULER_WINDOWS ; SELECT * FROM DBA_SCHEDULER_WINDOW_DETAILS ; SELECT * FROM DBA_SCHEDULER_WINDOW_LOG ; -- SELECT * FROM DBA_SCHEDULER_PROGRAMS ; SELECT * FROM DBA_SCHEDULER_PROGRAM_ARGS ; -- SELECT * FROM DBA_SCHEDULER_JOB_ROLES ; SELECT * FROM DBA_SCHEDULER_JOB_ARGS ; SELECT * FROM DBA_SCHEDULER_JOB_DESTS ; SELECT * FROM DBA_SCHEDULER_NOTIFICATIONS ; SELECT * FROM DBA_SCHEDULER_REMOTE_DATABASES ; SELECT * FROM DBA_SCHEDULER_REMOTE_JOBSTATE ; SELECT * FROM DBA_SCHEDULER_RUNNING_CHAINS ; -- SELECT * FROM DBA_SCHEDULER_CHAINS ; SELECT * FROM DBA_SCHEDULER_CHAIN_RULES ; SELECT * FROM DBA_SCHEDULER_CHAIN_STEPS ; SELECT * FROM DBA_SCHEDULER_CREDENTIALS ; SELECT * FROM DBA_SCHEDULER_DB_DESTS ; SELECT * FROM DBA_SCHEDULER_DESTS ; SELECT * FROM DBA_SCHEDULER_EXTERNAL_DESTS ; SELECT * FROM DBA_SCHEDULER_FILE_WATCHERS ; --****************************************************************************** -- DataGuard Rollforward using Incremental Backups -- http://docs.oracle.com/cd/E11882_01/server.112/e41134/rman.htm#SBYDB00759 --****************************************************************************** -------------------------------- -- Datafile Queries -------------------------------- SQL> SELECT TO_CHAR(TRUNC(CHECKPOINT_TIME,'HH24'),'DD-MON-YYYY HH24') "CHECKPOINT_TIME (Hour)", count(1) from v$datafile_header group by TRUNC(CHECKPOINT_TIME,'HH24') order by 1 asc; -- On Standby database SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL; set numf 99999999999999999 SELECT CURRENT_SCN FROM V$DATABASE; -- On a standby that has widespread nologging changes, query the V$DATAFILE view to record the lowest FIRST_NONLOGGED_SCN SELECT MIN(FIRST_NONLOGGED_SCN) FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN>0 ; -- On a standby that has nologging changes on a subset of datafiles, query the V$DATAFILE view, as follows: SELECT FILE#, FIRST_NONLOGGED_SCN FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN > 0; -- On Primary database RMAN> CONFIGURE DEVICE TYPE DISK PARALLELISM 16; BACKUP INCREMENTAL FROM SCN 685401030526 DATABASE FORMAT '/export/ora_stage/vishal/incremental_backup/ForStandby_%U' tag 'FORSTANDBY2' FILESPERSET 10 MAXSETSIZE 32G; -- or If the standby has nologging changes on a subset of datafiles, -- then create an incremental backup for each datafile listed in the FIRST_NONLOGGED_SCN column (recorded in step 2), as follows: RMAN> BACKUP INCREMENTAL FROM SCN 682842365996 DATAFILE 4 FORMAT '/tmp/ForStandby_%U' TAG 'FORSTANDBY2'; BACKUP INCREMENTAL FROM SCN 682842365996 DATAFILE 5 FORMAT '/tmp/ForStandby_%U' TAG 'FORSTANDBY2'; -- On Standby database -- Transfer backups if not available via NFS mount point RMAN> CATALOG START WITH '/export/ora_stage/vishal/incremental_backup/ForStandby' NOPROMPT; STARTUP NOMOUNT; -- Controlfile restore is only needed if there is archivelog gap. -- RMAN> RESTORE STANDBY CONTROLFILE FROM TAG 'FORSTANDBY'; -- -- catalog all datafile -- Switch datafiles RMAN> ALTER DATABASE MOUNT; RECOVER DATABASE NOREDO; -- Check nologging operations SELECT MIN(FIRST_NONLOGGED_SCN) FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN>0 ; ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT FROM SESSION; -- On Standby database RMAN> crosscheck backupset TAG 'FORSTANDBY2'; delete backupset TAG 'FORSTANDBY2'; --*************************************************** --************************************* -- DataGuard Corruption Recovery -- Resolving ORA-752 or ORA-600 [3020] During Standby Recovery (Doc ID 1265884.1) --************************************* /* Errors in file /u01/app/oracle/diag/rdbms/pr01zfs/pr01zfs1/trace/pr01zfs1_mrp0_105546.trc (incident=416587): ORA-00600: internal error code, arguments: [3020], [2238], [98776], [805405144], [], [], [], [], [], [], [], [] ORA-10567: Redo is inconsistent with data block (file# 2238, block# 98776, file offset is 809172992 bytes) ORA-10564: tablespace PM_IDX ORA-01110: data file 2238: '/zfssa/stby/backup2/pr01pimi/datafile/PR01ZFS/datafile/o1_mf_pm_idx__ec3466ce_.dbf' ORA-10561: block type 'TRANSACTION MANAGED INDEX BLOCK', data object# 5030566 Incident details in: /u01/app/oracle/diag/rdbms/pr01zfs/pr01zfs1/incident/incdir_416587/pr01zfs1_mrp0_105546_i416587.trc */ /* If below command take too long to cancel MRP, then kill PR00 parallel slave recovery process using alter system command at database level. Avoid kill -9 */ alter database recover managed standby database cancel immediate; -- By default following will start serial recovery alter database recover automatic standby database TEST allow 99 corruption; -- alter database recover automatic standby database until cancel allow 10 corruption ; -->> Wait until you see one of the follwoing message in alert log -->> CORRUPTING BLOCK 1134925 OF FILE 48 AND CONTINUING RECOVERY -->> ORA-01578: ORACLE data block corrupted (file # 48, block # 1134925) -->> Now CTRL-C to cancel recovery -- alter database recover managed standby database disconnect ; alter database recover cancel; -- Rest problematic datafiles alter database recover managed standby database using current logfile disconnect ; alter database recover managed standby database cancel immediate; alter database open read only; -->> Copy the corrupted datafile from primary or another standby. RMAN> run { allocate channel c1 device type disk; allocate channel c2 device type disk; allocate channel c3 device type disk; allocate channel c4 device type disk; allocate channel c5 device type disk; sql 'alter database recover managed standby database cancel'; catalog start with '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1514_g2nui9rc.dbf' noprompt; catalog start with '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1666_g3nuib6g.dbf' noprompt; restore datafile 1514,1666 ; change datafilecopy '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1514_g2nui9rc.dbf' uncatalog; change datafilecopy '/export/ora_stage/rman_backup/data_D-PR01PIMI_I-2013469077_TS-PM_DATA_BIG_FNO-1666_g3nuib6g.dbf' uncatalog; sql 'alter database recover managed standby database using current logfile disconnect '; } run{ switch datafile 2201 to datafilecopy '/zfssa/stby/backup2/pr01pimi/datafile/PR01ZFS/datafile/o1_mf_pm_idx__f92b4506_2.dbf'; } --************************************* -- DataGuard Corruption File recovery --************************************* RMAN> restore file 1234; sql 'alter databaser recover managed standby database using current logfile disconnect'; --**************************************** -- Transient Type queries --**************************************** select TO_CHAR(CTIME,'YYYY-MM-DD HH24') ctime , count(1) from sys.obj$ o, sys.type$ t, dba_users u where o.oid$ = t.tvoid and u.user_id = o.owner# and bitand(t.properties,8388608) = 8388608 and (sysdate-o.ctime) > 0.0007 GROUP BY TO_CHAR(CTIME,'YYYY-MM-DD HH24') ORDER BY 1 ; select count(1) , TO_CHAR(min(CTIME),'DD-MON-YY HH24:MI:SS') min_ctime , TO_CHAR(max(ctime),'DD-MON-YY HH24:MI:SS') max_ctime from sys.obj$ o, sys.type$ t, dba_users u where o.oid$ = t.tvoid and u.user_id = o.owner# and bitand(t.properties,8388608) = 8388608 and (sysdate-o.ctime) > 0.0007 ; --**************************************** -- How to diagnose following wait events --**************************************** --**************************************** -- 'cursor: pin S wait on X' -- Doc ID 1298015.1 - WAITEVENT: "cursor: pin S wait on X" Reference Note -- Doc ID 1349387.1 - Troubleshooting 'cursor: pin S wait on X' waits. --**************************************** -- SQL causing the issue (P1) SELECT sql_id, sql_text, version_count FROM GV$SQLAREA WHERE HASH_VALUE in (select p1 from gv$session where event = 'cursor: pin S wait on X'); -- SID Holding Mutex (P2) SELECT inst_id , sid , decode(trunc(P2/4294967296), 0,trunc(P2/65536), trunc(P2/4294967296)) SID_HOLDING_MUTEX , status , username , osuser , machine , program , module, action , event FROM gv$session WHERE event = 'cursor: pin S wait on X' ; -- Oracle Code Location (P3) SELECT s.sid , m.inst_id , m.MUTEX_TYPE , m.LOCATION , m.wait_time , m.sleeps FROM sys.x$mutex_sleep m , gv$session s WHERE s.event = 'cursor: pin S wait on X' AND m.inst_id = s.inst_id AND m.mutex_type like 'Cursor Pin%' and m.location_id= decode(trunc(s.P3/4294967296), 0,trunc(s.P3/65536), trunc(s.P3/4294967296) ) ; --**************************************** -- 'library cache lock' -- MOS Note Id 122793.1 - How to Find which Session is Holding a Particular Library Cache Lock -- Troubleshooting Library Cache: Lock, Pin and Load Lock (Doc ID 444560.1) --**************************************** select inst_id,sid,saddr, status, username, osuser,machine , program -- , BLOCKING_INSTANCE, BLOCKING_SESSION, BLOCKING_SESSION_STATUS , FINAL_BLOCKING_INSTANCE , FINAL_BLOCKING_SESSION, FINAL_BLOCKING_SESSION_STATUS , event from gv$session where event= 'library cache lock'; select kgllkhdl Handle,kgllkreq Request, kglnaobj Object from sys.x$kgllk where kgllkses in (select saddr from gv$session where event= 'library cache lock') and kgllkreq > 0; --**************************************** -- 'library cache pin' -- MOS Note Id 780514.1 - How to Find the Blocker of the 'library cache pin' in a RAC environment? -- WAITEVENT: "library cache pin" Reference Note [ID 34579.1] -- Troubleshooting Library Cache: Lock, Pin and Load Lock (Doc ID 444560.1) --**************************************** --**************************************** -- Shrink Datafiles --**************************************** select 'alter index ' || owner || '.' || segment_name || ' rebuild ' || nvl2(partition_name,' partition ' || partition_name ,' ') || ' online parallel 10;' , block_id, blocks from (select owner, segment_name, partition_name, max(block_id) block_id, max(blocks) blocks from dba_extents where file_id = 1909 group by owner, segment_name, partition_name ) order by block_id desc ; set lines 200 set pages 400 UNDEFINE tablespace_name DEFINE tablespace_name select /*+ CHOOSE */ 'alter database datafile ''' || file_name || ''' resize ' || TO_CHAR(CEIL(HWM)+50) || 'M; ---- Freeable ' || freeable || 'MB' from ( Select /*+ CHOOSE */ t.tablespace_name , d.status "Status" , GREATEST(d.maxbytes,d.bytes)/1024/1024 maxsize , d.bytes/1024/1024 alloc , NVL((hwm.bytes)/1024/1024,0) HWM , ROUND((d.bytes - NVL(f.bytes,0))/1024/1024,2) used , ROUND(decode(f.bytes, NULL,0, f.bytes)/1024/1024,2) free , ROUND( (d.bytes - GREATEST( NVL(hwm.bytes,0), (d.bytes - NVL(f.bytes,0)) ) ) /1024/1024,2) freeable , d.INCREMENT_BY/1024/1024 Increment_by , t.pct_increase pct_increase , d.file_id , SUBSTR(d.file_name,1,80) file_name FROM DBA_DATA_FILES d , DBA_TABLESPACES t , (SELECT /*+ CHOOSE */ tablespace_name , file_id , sum(bytes) bytes FROM DBA_FREE_SPACE f WHERE tablespace_name LIKE '&&tablespace_name' GROUP BY tablespace_name, file_id) f , (Select /*+ CHOOSE */ file_id, NVL( max((block_id + blocks - 1 ) * p.value),0) bytes from dba_extents , v$system_parameter p WHERE tablespace_name LIKE '&&tablespace_name' AND p.name = 'db_block_size' GROUP BY file_id ) hwm WHERE t.tablespace_name = d.tablespace_name AND f.tablespace_name(+) = d.tablespace_name AND f.file_id(+) = d.file_id AND hwm.file_id(+) = d.file_id AND d.tablespace_name LIKE '&&tablespace_name' UNION ALL SELECT /*+ CHOOSE */ tf.tablespace_name , tf.status , GREATEST(tf.maxbytes,tf.bytes)/1024/1024 maxsize , (tf.bytes/1024/1024) alloc , (tf.bytes/1024/1024) HWM , ROUND(ts.bytes_used/1024/1024) used , ROUND(ts.bytes_free/1024/1024) free , ROUND((tf.bytes - tf.bytes)/1024/1024,2) freeable , tf.INCREMENT_BY/1024/1024 Increment_by , 0 pct_increase , tf.file_id , SUBSTR(tf.file_name,1,80) file_name FROM dba_temp_files tf, V$TEMP_SPACE_HEADER ts WHERE ts.file_id = tf.file_id and ts.tablespace_name LIKE '&&tablespace_name' ) c WHERE freeable > 100 AND file_name LIKE '+%' ORDER BY freeable desc, tablespace_name,c.file_id asc; --**************************************** -- SCAN listener --*************************************** for i in 1 2 3 ; do server=`crsctl stat res ora.LISTENER_SCAN$i.lsnr |grep STATE|awk '{print $NF}'`; ssh $server "grep '06-MAR-2013 07' /u01/app/11.2.0.2/grid/log/diag/tnslsnr/$server/listener_scan$i/trace/listener_scan$i.log | grep 'pr01pimi_adv_grid_con' " done To Trace SRVCTL issues. Set the debug flag on. export SRVM_TRACE=TRUE --**************************************** -- SMON and CJQ Memory Leak, increase Linux level process memory --*************************************** vm.max_map_count = 200000 # (Default is 65536) cat /proc/sys/vm/max_map_count echo 200000 > /proc/sys/vm/max_map_count Process level max PGA memory ******************************* = _realfree_heap_pagesize_hint * vm.max_map_count = 64k (default value) * 64k (default value) = 4GB Increase either Bug 11852492 : ORA-4030 OCCURS WHEN PGA EXCEEDS 4GB. ############################################################### # As per Automatic Memory Management (AMM) on 11g [ID 443746.1] # To fix ORA-4030 out of process memory errors in SMON process, # which were resulting in database instance crash # vm.max_map_count = 200000 --**************************************** --**************************************** --**** CJQ Workaround --**************************************** select * From dba_scheduler_running_jobs; --ORA-01405: Fetched Column Value Is Null For An Advanced Queue [ID 1162862.1] select owner, name, queue_type, enqueue_enabled, dequeue_enabled from dba_queues where owner='SYS' and queue_table='SYS$SERVICE_METRICS_TAB' and queue_type='NORMAL_QUEUE'; --Note1: Replace '.' appropriately with your affected queue table: exec dbms_aqadm.stop_queue(queue_name => 'SYS.SYS$SERVICE_METRICS'); delete from sys.aq$_SYS$SERVICE_METRICS_TAB_L where msgid ='00000000000000000000000000000000'; commit; DECLARE rer po dbms_aqadm.aq$_purge_options_t; BEGIN po.block := TRUE; DBMS_AQADM.PURGE_QUEUE_TABLE( queue_table => 'SYS.SYS$SERVICE_METRICS_TAB', purge_condition => 'qtview.queue = ''SYS.AQ$SYS$SERVICE_METRICS_TAB'' and qtview.msg_state = ''PROCESSED''', purge_options => po); commit; END; / select count(*) from SYS.aq$_SYS$SERVICE_METRICS_TAB_L where msgid ='00000000000000000000000000000000'; exec dbms_aqadm.start_queue(queue_name => 'SYS.SYS$SERVICE_METRICS') select owner, name, queue_type, enqueue_enabled, dequeue_enabled from dba_queues where owner='SYS' and queue_table='SYS$SERVICE_METRICS_TAB' and queue_type='NORMAL_QUEUE'; select * From dba_scheduler_running_jobs; ########################### # Database Events ########################### -------------------------------------- --dNFS (Direct NFS) releated events -------------------------------------- event "19392 trace name context forever, level 8" # kgnfs (dNFs) event "19394 trace name context forever, level 8" # skgnfs (Normal NFS) event "19396 trace name context forever, level 2" # kgodm (ODM Related) event "19398 trace name context forever, level 128" #mount errors alter system set event = '19392 trace name context forever, level 8','19394 trace name context forever, level 8','19396 trace name context forever, level 2','19398 trace name context forever, level 128' sid='*' scope=spfile ; Network NSLOOKUP ################## #Reverse Lookup ############## for i in {1..254}; do for j in {1..254}; do host="144.77.$i.$j" echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ `ping -c1 -i1 -w1 $host >/dev/null 2>&1 ; echo $?` ] -eq 0 ]; then echo Pingable ; fi;) done done for i in {1..254}; do for j in {1..254}; do host="192.168.$i.$j" echo $host - ` nslookup $host |grep name |cut -d= -f2 ` done done for j in {1..254} do host="10.201.119.$j" echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ "`ping -c1 -w1 $host >/dev/null 2>&1 ; echo $?`" -eq 0 ] ; then echo Pingable ; fi;) done % # Filter for specific named hosts for j in {1..254}; do for i in 120 129 do host="192.168.$i.$j"; if [[ "`nslookup $host |grep name |cut -d= -f2`" == *dm01* ]]; then echo $host - ` nslookup $host |grep name |cut -d= -f2 ` - $(if [ "`ping -c1 -w1 $host >/dev/null 2>&1 ; echo $?`" -eq 0 ] ; then echo Pingable ; fi;) fi done done HangAnalyze ################# Oracle Performance Diagnostic Guide (OPDG) (Doc ID 390374.1) How to Collect Diagnostics for Database Hanging Issues (Doc ID 452358.1) sqlplus -prelim '/ as sysdba' oradebug setorapname diag oradebug unlimit oradebug -g all hanganalyze 3 oradebug -g all dump systemstate 258 -- <> oradebug -g all hanganalyze 3 oradebug -g all dump systemstate 258 -- Upload the *diag* process traces from all the instances ######################################## Generate Service creation commands ######################################## ######################################## # 11g ######################################## database=ts01mtgz #tmpfile=/export/ora_stage/vishal/${database}_services.txt tmpfile=/tmp/${database}_services.txt srvctl config service -d $database > $tmpfile for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`; do servicename="`grep -A18 "Service name: $i" $tmpfile|grep 'Service name'|cut -d: -f2`"; preferred_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Preferred instances'|cut -d: -f2`"; available_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Available instances'|cut -d: -f2`"; service_role="`grep -A18 "Service name: $i" $tmpfile|grep 'Service role'|cut -d: -f2`"; aq_notification="`grep -A18 "Service name: $i" $tmpfile|grep 'AQ HA notifications'|cut -d: -f2`"; failover_type="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover type'|cut -d: -f2`"; failover_method="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover method'|cut -d: -f2`"; failover_delay="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover delay'|cut -d: -f2`"; failover_retries="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover retries'|cut -d: -f2`"; taf_policy="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF policy specification'|cut -d: -f2`"; clb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Connection Load Balancing Goal'|cut -d: -f2`"; rlb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Runtime Load Balancing Goal'|cut -d: -f2`"; dtp_transaction="`grep -A18 "Service name: $i" $tmpfile|grep 'DTP transaction'|cut -d: -f2`"; ### Display commands for adding services echo "srvctl add service -d $database -s $servicename -r $preferred_instances -a $available_instances -P $taf_policy -l $service_role -e $failover_type -m $failover_method -w $failover_delay -z $failover_retries -j $clb_goal -B $rlb_goal -x $dtp_transaction -q $aq_notification" ### Display commands for relocating services #echo "srvctl modify service -d $database -s $servicename -n -i $preferred_instances -a $available_instances " done ### Display commands for removing services for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`; do echo srvctl remove service -d $database -s $i done Usage: srvctl add service -d -s {-r "" [-a ""] [-P {BASIC | NONE | PRECONNECT}] | -g [-c {UNIFORM | SINGLETON}] } [-k ] [-l [PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]] [-y {AUTOMATIC | MANUAL}] [-q {TRUE|FALSE}] [-x {TRUE|FALSE}] [-j {SHORT|LONG}] [-B {NONE|SERVICE_TIME|THROUGHPUT}] [-e {NONE|SESSION|SELECT}] [-m {NONE|BASIC}] [-z ] [-w ] [-t ] [-f] -d Unique name for the database -s Service name -r "" Comma separated list of preferred instances -a "" Comma separated list of available instances -g Server pool name -c {UNIFORM | SINGLETON} Service runs on every active server in the server pool hosting this service (UNIFORM) or just one server (SINGLETON) -k network number (default number is 1) -P {NONE | BASIC | PRECONNECT} TAF policy specification -l Role of the service (primary, physical_standby, logical_standby, snapshot_standby) -y Management policy for the service (AUTOMATIC or MANUAL) -e Failover type (NONE, SESSION, or SELECT) -m Failover method (NONE or BASIC) -w Failover delay -z Failover retries -t Edition (or "" for empty edition value) -j Connection Load Balancing Goal (SHORT or LONG). Default is LONG. -B Runtime Load Balancing Goal (SERVICE_TIME, THROUGHPUT, or NONE) -x Distributed Transaction Processing (TRUE or FALSE) -q AQ HA notifications (TRUE or FALSE) Usage: srvctl add service -d -s -u {-r "" | -a ""} [-f] -d Unique name for the database -s Service name -u Add a new instance to service configuration -r Name of new preferred instance -a Name of new available instance -f Force the add operation even though a listener is not configured for a network -h Print usage Service name: bt01pimi_tools_con Service is enabled Server pool: bt01pimi_bt01pimi_tools_con Cardinality: 3 Disconnect: false Service role: PRIMARY Management policy: AUTOMATIC DTP transaction: false AQ HA notifications: false Failover type: SESSION Failover method: BASIC TAF failover retries: 0 TAF failover delay: 0 Connection Load Balancing Goal: LONG Runtime Load Balancing Goal: NONE TAF policy specification: BASIC Edition: Preferred instances: bt01pimi1,bt01pimi2,bt01pimi3 Available instances: ############################################# # Generate script from 11g output for 12c ############################################# database=al01pimn #tmpfile=/export/ora_stage/vishal/${database}_services.txt tmpfile=/tmp/${database}_services.txt srvctl config service -d $database > $tmpfile for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`; do servicename="`grep -A18 "Service name: $i" $tmpfile|grep 'Service name'|cut -d: -f2`"; preferred_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Preferred instances'|cut -d: -f2`"; available_instances="`grep -A18 "Service name: $i" $tmpfile|grep 'Available instances'|cut -d: -f2`"; service_role="`grep -A18 "Service name: $i" $tmpfile|grep 'Service role'|cut -d: -f2`"; aq_notification="`grep -A18 "Service name: $i" $tmpfile|grep 'AQ HA notifications'|cut -d: -f2`"; failover_type="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover type'|cut -d: -f2`"; failover_method="`grep -A18 "Service name: $i" $tmpfile|grep 'Failover method'|cut -d: -f2`"; failover_delay="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover delay'|cut -d: -f2`"; failover_retries="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF failover retries'|cut -d: -f2`"; taf_policy="`grep -A18 "Service name: $i" $tmpfile|grep 'TAF policy specification'|cut -d: -f2`"; clb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Connection Load Balancing Goal'|cut -d: -f2`"; rlb_goal="`grep -A18 "Service name: $i" $tmpfile|grep 'Runtime Load Balancing Goal'|cut -d: -f2`"; dtp_transaction="`grep -A18 "Service name: $i" $tmpfile|grep 'DTP transaction'|cut -d: -f2`"; ### Display commands for adding services echo "srvctl add service -d $database -s $servicename -r $preferred_instances -a $available_instances -P $taf_policy -l $service_role -e $failover_type -m $failover_method -w $failover_delay -z $failover_retries -j $clb_goal -B $rlb_goal -x $dtp_transaction -q $aq_notification" ### Display commands for relocating services #echo "srvctl modify service -d $database -s $servicename -n -i $preferred_instances -a $available_instances " done ### Display commands for removing services for i in `cat $tmpfile|grep 'Service name'|cut -d: -f2`; do echo srvctl remove service -d $database -s $i done Usage: srvctl add service -db -service {-preferred "" [-available ""] [-tafpolicy {BASIC | NONE | PRECONNECT}] | -serverpool [-cardinality {UNIFORM | SINGLETON}] } [-netnum ] [-role "[PRIMARY][,PHYSICAL_STANDBY][,LOGICAL_STANDBY][,SNAPSHOT_STANDBY]"] [-policy {AUTOMATIC | MANUAL}] [-notification {TRUE|FALSE}] [-dtp {TRUE|FALSE}] [-clbgoal {SHORT|LONG}] [-rlbgoal {NONE|SERVICE_TIME|THROUGHPUT}] [-failovertype {NONE|SESSION|SELECT|TRANSACTION}] [-failovermethod {NONE|BASIC}] [-failoverretry ] [-failoverdelay ] [-edition ] [-pdb ] [-global {TRUE|FALSE}] [-maxlag ] [-sql_translation_profile ] [-commit_outcome {TRUE|FALSE}] [-retention ] [-replay_init_time ] [-session_state {STATIC|DYNAMIC}] [-pqservice ] [-pqpool ] [-gsmflags ] [-force] [-eval] [-verbose] -db Unique name for the database -service Service name -preferred "" Comma separated list of preferred instances -available "" Comma separated list of available instances -serverpool Server pool name -cardinality (UNIFORM | SINGLETON) Service runs on every active server in the server pool hosting this service (UNIFORM) or just one server (SINGLETON) -netnum Network number (default number is 1) -tafpolicy (NONE | BASIC | PRECONNECT) TAF policy specification -role Role of the service (primary, physical_standby, logical_standby, snapshot_standby) -policy Management policy for the service (AUTOMATIC or MANUAL) -failovertype (NONE | SESSION | SELECT | TRANSACTION) Failover type -failovermethod (NONE | BASIC) Failover method -failoverdelay Failover delay (in seconds) -failoverretry Number of attempts to retry connection -edition Edition (or "" for empty edition value) -pdb Pluggable database name -maxlag Maximum replication lag time in seconds (Non-negative integer, default value is 'ANY') -clbgoal (SHORT | LONG) Connection Load Balancing Goal. Default is LONG. -rlbgoal (SERVICE_TIME | THROUGHPUT | NONE) Runtime Load Balancing Goal -dtp (TRUE | FALSE) Distributed Transaction Processing -notification (TRUE | FALSE) Enable Fast Application Notification (FAN) for OCI connections -global Global attribute (TRUE or FALSE) -preferred Name of new preferred instance -available Name of new available instance -sql_translation_profile Specify a database object for SQL translation profile -commit_outcome (TRUE | FALSE) Commit outcome -retention Specifies the number of seconds the commit outcome is retained -replay_init_time Seconds after which replay will not be initiated -session_state Session state consistency (STATIC or DYNAMIC) -pqservice Parallel query service name -pqpool Parallel query server pool name -gsmflags Set locality and region failover values -eval Evaluates the effects of event without making any changes to the system Usage: srvctl add service -db -service -update {-preferred "" | -available ""} [-force] [-verbose] -db Unique name for the database -service Service name -update Add a new instance to service configuration -preferred Name of new preferred instance -available Name of new available instance -force Force the add operation even though a listener is not configured for a network -verbose Verbose output -help Print usage Service name: bt01pimi_tools_con Service is enabled Server pool: bt01pimi_bt01pimi_tools_con Cardinality: 3 Disconnect: false Service role: PRIMARY Management policy: AUTOMATIC DTP transaction: false AQ HA notifications: false Failover type: SESSION Failover method: BASIC TAF failover retries: 0 TAF failover delay: 0 Connection Load Balancing Goal: LONG Runtime Load Balancing Goal: NONE TAF policy specification: BASIC Edition: Preferred instances: bt01pimi1,bt01pimi2,bt01pimi3 Available instances: ################################################################################# # Oracle Binary Options & Direct FNS (dNFS) setting # MOS Note ID: 948061.1 - How to Check and Enable/Disable Oracle Binary Options ################################################################################# #--- List the object files for different options #-- Object files for turning Options ON and OFF grep _ON $ORACLE_HOME/rdbms/lib/env_rdbms.mk |grep OBJ_EXT grep _OFF $ORACLE_HOME/rdbms/lib/env_rdbms.mk |grep OBJ_EXT FORMAT="%35s \t %3s \n" for i in `strings $ORACLE_HOME/lib/libodm??.so | grep odm_init | awk '{print $NF}' | cut -d" " -f1` `ar -t $ORACLE_HOME/rdbms/lib/libknlopt.a |awk '{print $NF}' ` do case $i in kfon.o) printf "$FORMAT" "Oracle ASM" "ON";; kfoff.o) printf "$FORMAT" "Oracle ASM" "OFF";; kciwcx.o) printf "$FORMAT" "Oracle Context" "ON";; kcincx.o) printf "$FORMAT" "Oracle Context" "OFF";; dmwdm.o) printf "$FORMAT" "Oracle Data Mining" "ON";; dmndm.o) printf "$FORMAT" "Oracle Data Mining" "OFF";; ktd.o) printf "$FORMAT" "Oracle DDB" "ON";; ksnktd.o) printf "$FORMAT" "Oracle DDB" "OFF";; kzvidv.o) printf "$FORMAT" "Oracle Database Vault" "ON";; kzvndv.o) printf "$FORMAT" "Oracle Database Vault" "OFF";; kgodm_init) printf "$FORMAT" "Oracle Direct NFS" "ON";; odm_init) printf "$FORMAT" "Oracle Direct NFS" "OFF";; jox.o) printf "$FORMAT" "Oracle JOX" "ON";; joxoff.o) printf "$FORMAT" "Oracle JOX" "OFF";; kprwts.o) printf "$FORMAT" "Oracle KPT" "ON";; kprnts.o) printf "$FORMAT" "Oracle KPT" "OFF";; kzlilbac.o) printf "$FORMAT" "Oracle Label Security" "ON";; kzlnlbac.o) printf "$FORMAT" "Oracle Label Security" "OFF";; xsyeolap.o) printf "$FORMAT" "Oracle OLAP" "ON";; xsnoolap.o) printf "$FORMAT" "Oracle OLAP" "OFF";; kkpoban.o) printf "$FORMAT" "Oracle Partitioning" "ON";; ksnkkpo.o) printf "$FORMAT" "Oracle Partitioning" "OFF";; kkxwtp.o) printf "$FORMAT" "Oracle PL/SQL" "ON";; kkxntp.o) printf "$FORMAT" "Oracle PL/SQL" "OFF";; kcsm.o) printf "$FORMAT" "Oracle Real Application Cluster" "ON";; ksnkcs.o) printf "$FORMAT" "Oracle Real Application Cluster" "OFF";; kecwr.o) printf "$FORMAT" "Oracle Real Application Testing" "ON";; kecnr.o) printf "$FORMAT" "Oracle Real Application Testing" "OFF";; kxmwsd.o) printf "$FORMAT" "Oracle SDO" "ON";; kxmnsd.o) printf "$FORMAT" "Oracle SDO" "OFF";; kdzof.o) printf "$FORMAT" "Oracle ZDK" "OFF";; sllfls.o) printf "$FORMAT" "Oracle LIL" "OFF";; xaonsl.o) printf "$FORMAT" "Oracle XAS" "OFF";; xaondy.o) printf "$FORMAT" "Oracle XAD" "OFF";; *) printf "$FORMAT" "$i" "" ;; esac done |sort /*------------------------------------------------------------------------------------ Convert Physical standby to Snapshost standby Ref - http://docs.oracle.com/cd/B28359_01/server.111/b28294/manage_ps.htm#BACIEJJI ------------------------------------------------------------------------------------*/ sqlplus / as sysdba < 'Idle' and s.program like '%(PR%' order by s.program ; ----------------------------- -- Parallel Recovery process ----------------------------- SELECT p.inst_id , p.pname , DECODE(sw.p1text, 'log#',(select thread# from v$archived_log l2 where l2.first_time = (select max(first_Time) from v$archived_log l where l.sequence# = sw.p1 ) and l2.sequence# = sw.p1) ,'') thread# , DECODE(sw.p1text, 'log#',sw.p1 ,'') sequence# , DECODE(sw.p1text, 'log#',(select max(first_Time) from v$archived_log l where l.sequence# = sw.p1 ),'') archlog_first_Time , DECODE(sw.p1text, 'log#',sw.p2 ,'') block# , DECODE(sw.p1text, 'log#',sw.p3 ,'') blocks , sw.event , sw.p1text , sw.p1 FROM gv$session_wait sw JOIN gv$session s ON s.inst_id = sw.inst_id and sw.sid = s.sid and s.program like '%' JOIN gv$process p ON s.inst_id = p.inst_id and s.paddr = p.addr where sw.wait_class <> 'Idle' and s.program like '%(PR%' order by s.program ; ----------------------------- -- Recovery Sesssion Events ----------------------------- select s.program, se.* From gv$session_event se JOIN gv$session s ON s.inst_id = se.inst_id and se.sid = s.sid JOIN gv$process p ON p.inst_id = p.inst_id and s.paddr = p.addr where s.program like '%(PR%' and se.wait_class not in ('Idle') order by se.time_waited desc ; SELECT * FROM gv$managed_standby; SELECT * FROM gv$recovery_progress; select * From v$recovery_status; select * From V$RECOVERY_FILE_STATUS; select * From V$instance_recovery; select * From V$instance_recovery; SELECT thread#, sequence#, status, applied,archived, deleted, first_Time, next_time FROM v$archived_log where sequence# = 156987 order by first_time desc; ----------------------------- -- Get Latch details -- -- Ref -- http://www.pythian.com/blog/tuning-latch-contention-cache-buffers-chain-latches/ -- http://blog.tanelpoder.com/2010/02/15/new-versions-of-latchprof-and-latchprofx-for-latch-contention-troubleshooting-and-tuning/ ----------------------------- -- Get latch Summary set lines 1000 set verify off @latchprof name % % 1000 @latchprof name,sid % % 1000 @latchprof name,sid % "cache buffers chains" 1000 @latchname 186 @latchname "%cache%" -- Get latch details select /*+ leading(s) USE_NL(s l) */ l.* , s.sid , s.inst_id , s.userame , s.osuser , s.program From gv$session s JOIN gV$LATCH_CHILDREN l ON l.inst_id = s.inst_id and l.addr = RAWTOHEX(s.P1RAW) WHERE s.event like '%latch%' ; --------------------------------------- -- Dynamic Object Block Remastering --------------------------------------- SELECT o.owner , o.object_name , o.subobject_name , o.object_type , f.tablespace_name , g.file_id , g.gc_mastering_policy , g.current_master , g.previous_master , g.remaster_cnt FROM v$gcspfmaster_info g INNER JOIN dba_objects o on o.data_object_id = g.data_object_id LEFT OUTER JOIN dba_data_files f ON f.file_id = g.file_id WHERE 1=1 AND o.owner LIKE '%' AND o.object_name LIKE '%' ; ------------------------------------------ -- Convert comma separated list to rows ------------------------------------------- SELECT regexp_substr(:csv_list, '[^,]+', 1, LEVEL) token FROM dual CONNECT BY instr(:csv_list, ',', 1, LEVEL - 1) > 0 ; ------------------------------------------ -- Convert rows to comma separated list ------------------------------------------- select listagg(name, ',') within group (order by id) as list from student; select RTRIM(XMLAGG(XMLELEMENT(E,TABLE_NAME,',').EXTRACT('//text()') ORDER BY TABLE_NAME).GETCLOBVAL(),',') AS TABLE_LIST from dba_tables; -------------------------------------------------------------- -- Delete Cluster Health Service , Berkley database file -- ora.crf resource can be stopped without impacting anything -------------------------------------------------------------- cd /u01/app/11.2.0.4/grid/crf/db/`hostname -s`/ crsctl stop res ora.crf –init sudo –u root rm crfclust.bdb crsctl start res ora.crf –init crsctl status res ora.crf –init -------------------------------------------------------------- -- Get Partition name based on high_value -- Convert LONG to VARCHAR2 -------------------------------------------------------------- select * from xmltable( '/ROWSET/ROW' passing dbms_xmlgen.getXMLType(' select table_owner , table_name , partition_name , subpartition_count , high_value /* <---- Long Column type*/ from dba_tab_partitions WHERE table_owner = ''LEVERAGE_OWN'' AND table_name = ''TEMPLATE_RESULT_DETAILS_CLOSE'' ' ) columns table_owner varchar2(30) , table_name varchar2(30) , partition_name varchar2(30) , subpartition_count NUMBER , high_value varchar2(4000) ) WHERE 1=1 and high_value like '%' || TO_CHAR(trunc(sysdate+1),'YYYY-MM-DD HH24:MI:SS') || '%' ; -------------------------------------------------------------- -- Automatic Diagnostic Repository (ADR) -------------------------------------------------------------- $ORACLE_HOME/bin/adrci < move LOB ( .XMLDATA ) STORE AS ( TABLESPACE ); ------------------------------------------------------------------- --- Move LOB ------------------------------------------------------------------- alter table move LOB ( .XMLDATA ) STORE AS ( TABLESPACE ); ------------------------------------------------------------------- --- Move XMLType ------------------------------------------------------------------- alter table move LOB ( ) STORE AS ( TABLESPACE ); ------------------------------------------------------------------- --- SQL Binds ------------------------------------------------------------------- SELECT s.inst_id , s.sql_id , b.child_number , b.child_address , b.name bind_name , B.DATATYPE_STRING bind_datatype , b.value_string bind_value , B.VALUE_ANYDATA , B.WAS_CAPTURED , b.last_captured FROM gv$sql s JOIN gv$sql_bind_capture b ON b.inst_id = s.inst_id and b.sql_id = s.sql_id WHERE s.sql_id='c7y4045sx7ax6' ; ------------------------------------------------------------------- --- To speedup queries against DBA_FREE_SPACE view. ------------------------------------------------------------------- @stats SYS X$KTFBUE @stats SYS RECYCLEBIN$ exec dbms_stats.gather_table_stats('SYS','X$KTFBUE',degree=>16) exec dbms_stats.gather_table_stats('SYS','RECYCLEBIN$',degree=>16) SELECT NVL(SUM(BYTES),0)/(1024*1024) FREE_BYTES FROM DBA_FREE_SPACE WHERE TABLESPACE_NAME = 'SYSAUX'; ------------------------------------------------------------------- --- Relocate SCAN listener in OEM from one agent to another ------------------------------------------------------------------- # # On OEM application server , where weblogic runs. # Example location of emcli # $OMS_HOME/oms/bin/emcli # /u01/oracle/app/product/Middleware/oms12c/oms/bin/emcli # emcli help relocate_targets emcli login  -username= -password= emcli relocate_targets \ -src_agent=source.hostname.com:1830 \ -dest_agent=target.hostname.com:1830 \ -target_name=LISTENER_SCAN1_dmz \ -target_type=oracle_listener \ -copy_from_src \ -force=yes ------------------------------------------------------------------- --- APR cache debugging ------------------------------------------------------------------- # Check that MAC address of SCAN VIP on client machines matches the MAC address on DB server of underlying bond interface for a SCAN VIP address. # Display current arp cache on a host /sbin/arp -h /sbin/arp -v |sort -k1 # Display dns names of hosts /sbin/arp -vn |sort -k1 # Display numeric IPs arping $ /sbin/arp -h Usage: arp [-vn] [] [-i ] [-a] [] <-Display ARP cache arp [-v] [-i ] -d [pub][nopub] <-Delete ARP entry arp [-vnD] [] [-i ] -f [] <-Add entry from file arp [-v] [] [-i ] -s [temp][nopub] <-Add entry arp [-v] [] [-i ] -s [netmask ] pub <-''- arp [-v] [] [-i ] -Ds [netmask ] pub <-''- -a display (all) hosts in alternative (BSD) style -e display (all) hosts in default (Linux) style -s, --set set a new ARP entry -d, --delete delete a specified entry -v, --verbose be verbose -n, --numeric don't resolve names -i, --device specify network interface (e.g. eth0) -D, --use-device read from given device -A, -p, --protocol specify protocol family -f, --file read new entries from file or from /etc/ethers =Use '-H ' to specify hardware address type. Default: ether List of possible hardware types (which support ARP): strip (Metricom Starmode IP) ash (Ash) ether (Ethernet) tr (16/4 Mbps Token Ring) tr (16/4 Mbps Token Ring (New)) ax25 (AMPR AX.25) netrom (AMPR NET/ROM) rose (AMPR ROSE) arcnet (ARCnet) dlci (Frame Relay DLCI) fddi (Fiber Distributed Data Interface) hippi (HIPPI) irda (IrLAP) x25 (generic X.25) infiniband (InfiniBand) $ /sbin/arping -h Usage: arping [-fqbDUAV] [-c count] [-w timeout] [-I device] [-s source] destination -f : quit on first reply -q : be quiet -b : keep broadcasting, don't go unicast -D : duplicate address detection mode -U : Unsolicited ARP mode, update your neighbours -A : ARP answer mode, update your neighbours -V : print version and exit -c count : how many packets to send -w timeout : how long to wait for a reply -I device : which ethernet device to use (eth0) -s source : source ip address destination : ask for what ip address ------------------------------------------------------------------- --- Excel Size Analysis ------------------------------------------------------------------- Press Alt-F11 and copy the sub below into any VB module. Position the cursor anywhere in the Sub and press F5. The results appear in the Immediate Window, which is usually to be found at the bottom of the VBA window. Type Control-G if it’s not visible. Sub ScanSheets() 'report sheet used range size, number of Query Tables on it and number of pivot tables Dim ws As Worksheet Debug.Print Left("Name" & String$(20, " "), 20) & Left("Rows" & String$(8, " "), 8) & Left("Cols" & String$(6, " "), 6) & "QueryTables" & vbTab & "PivotTables" Debug.Print String$(60, "-") For Each ws In Worksheets Debug.Print Left(ws.Name & String$(20, " "), 20) & _ Left(CStr(ws.UsedRange.Rows.Count) & String$(8, " "), 8) & _ Left(CStr(ws.UsedRange.Columns.Count) & String$(6, " "), 6) & _ ws.QueryTables.Count & vbTab & vbTab & vbTab & vbTab & ws.PivotTables.Count Next 'ws End Sub Here’s the results for one of the sheets Paolo sent: Name Rows Cols QueryTables PivotTables ------------------------------------------------------------ 2012red 246 53 0 0 bill2012 164 21 0 0 2012 217 49 0 0 CAL act 513 21 0 0 Cal exp. 481 21 0 0 cheat 51 27 0 0 database 147 142 0 0 BM bonds 242 18 0 0 eccomm 230 93 0 0 this week 109 42 0 0 create table 4886 16384 0 0 history2 771 23 0 0 history1 64521 87 0 0 overview 579 61 0 4 ovhist 106 18 0 0 disc 740 37 0 0 Italy 322 28 0 0 Italy 2 214 148 0 0 EFSF 163 16314 0 0 GreecePSI 1048048 44 0 0 auction 282 42 0 0 Port 145 17 0 0 study 56 31 0 0 last week 95 15 0 0 spain2 39 13 0 0 Spain 120 15 0 0 I first looked at the 4 Pivot Tables on sheet ‘overview’. I saw that all these were taking 65k rows from sheet ‘history1’ and expect that this is the problem the user was having. I then went in and deleted extra rows and columns from ‘create table’, ‘history1’, ‘EFSF’ and ‘GreecePSI’ This reduced the file footprint from 33MB to 3MB but also had the effect of cutting down the range used by the 4 Pivot Tables on sheet ‘overview.’ I would expect the reduced sheet (attached) to run without problem. There are no MS Queries or other code to populate ‘history1’ so one thing to do is find out how it is being populated and provide a macro that automatically resizes the range of the PivotTables when the user changes ‘history1’ Tell me if this is too much information ? ------------------------------------------------------------- -- Hourly DB Statistics (Derived from ASH) ------------------------------------------------------------- select TO_CHAR(TRUNC(ash.sample_time,'MI'),'YYYY-MM-DD HH24:MI') sample_time , SUM(ash.DELTA_READ_IO_REQUESTS) read_io_requests , sum(ash.delta_write_io_requests) "Phy" , ROUND(sum(ash.delta_read_io_bytes)/power(1024,2)) "PhyRead (MB)" , ROUND(sum(ash.delta_write_io_bytes)/power(1024,2)) "PhyWrite (MB)" , ROUND(sum(ash.delta_interconnect_io_bytes)/power(1024,2)) "Interconnect (MB)" , sum(ash.tm_delta_time) tm_time , sum(ash.tm_delta_cpu_time) tm_cpu_time , sum(ash.tm_delta_db_time) tm_db_time from gv$active_session_history ash where ash.sample_time > systimestamp - 1/24 GROUP BY TRUNC(ash.sample_time,'MI') ORDER BY TRUNC(ash.sample_time,'MI') ; ------------------------------------------------------------- -- DB Statistics by User (Derived from SQL Statistics) ------------------------------------------------------------- SELECT ss.parsing_schema_name , TO_CHAR(MIN(s.end_interval_time),'DD-MON-YYYY HH24:MI') min_snapshot_time , TO_CHAR(MAX(s.end_interval_time),'DD-MON-YYYY HH24:MI') max_snapshot_time , sum(ss.executions_delta) executions , sum(ss.parse_calls_delta) parse_calls , sum(ss.disk_reads_delta) disk_reads , sum(ss.buffer_gets_delta) buffer_gets , sum(ss.rows_processed_delta) rows_processed , sum(ss.cpu_time_delta) cpu_time , sum(ss.elapsed_time_delta) elapsed_time , sum(ss.iowait_delta) iowait_time , sum(ss.clwait_delta) cluster_wait_time , sum(ss.ccwait_delta) concurrency_wait_time , sum(SS.apwait_delta) app_wait_time , sum(ss.plsexec_time_delta) pls_exec_time , sum(ss.javexec_time_delta) java_exec_time , sum(ss.direct_writes_delta) direct_writes , sum(ss.io_interconnect_bytes_delta) io_interconnect_bytes , sum(ss.io_offload_elig_bytes_delta) io_offload_elig_bytes , sum(ss.io_offload_return_bytes_delta) io_offload_return_bytes , sum(ss.physical_write_bytes_delta) physical_write_bytes , sum(ss.physical_read_bytes_delta) physical_read_bytes , sum(ss.optimized_physical_reads_delta) optimized_physical_reads , sum(ss.cell_uncompressed_bytes_delta) cell_uncompressed_bytes FROM v$database d JOIN dba_hist_snapshot s ON s.dbid = d.dbid JOIN dba_hist_sqlstat ss ON ss.dbid = s.dbid AND ss.instance_number = s.instance_number AND ss.snap_id = s.snap_id WHERE 1=1 AND s.end_interval_time > systimestamp - 1 GROUP BY ss.parsing_schema_name -- , TRUNC(s.end_interval_time,'HH24') ORDER BY -- physical_read_bytes desc nulls last -- physical_write_bytes desc nulls last -- direct_writes desc nulls last -- disk_reads desc nulls last -- buffer_gets desc nulls last -- optimized_physical_reads desc nulls last -- cell_uncompressed_bytes desc nulls last -- io_interconnect_bytes desc nulls last executions desc nulls last -- parse_calls desc nulls last -- elapsed_time desc nulls last -- TRUNC(s.end_interval_time,'HH24') ; ####################################### # LDAP/DNS commands ######################################## # List LDAP Servers host -t SRV _ldap._tcp ldapsearch -x -h ldapserver -s sub -b "DC=example,DC=com" cn='*namesearch_with_wildcard*' cn -x - simple authentication -h - ldaphost -b - searchbase -s - search subtree ####################################### # Find missing entry in known_hosts ######################################## for i in $(seq -f "%03g" 236 500) do ssh-keyscan server$i done \ | sort -u - ~/.ssh/known_hosts | diff ~/.ssh/known_hosts - ################################# # Add ssh keys between nodes ################################# expect -c ' spawn dcli -l root -g all_group -k while {1} { expect "(yes/no)?" { send "yes\n"} expect "password:" { send "welcome1\n"} } ' # print leading zero in 2 digit numbers in bash for i in $(seq -f "%03g" 236 400) do ssh-keyscan dtraflocorh$i done for i in $(seq -f "%03g" 236 400) do scp -pr .ssh dtraflocorh$i: done for i in $(seq -f "%03g" 236 400) do expect -c ' spawn scp -pr .ssh dtraflocorh${i}: while {1} { expect "(yes/no)?" { send "yes\n"} expect "password:" { send "welcome1\n"} } ' done #!/usr/bin/expect set $argv[0] [lindex $argv 0] set timeout 3 expect -c ' spawn scp -pr .ssh dtraflocorh${i}: while {1} { expect "(yes/no)?" { send "yes\n"} expect "password:" { send "welcome1\n"} } ' for i in $(seq -f "%03g" 236 400) do expect -f /tmp/a.ex done for i in $(seq -f "%03g" 236 400) do expect -c ' set HOST [lindex $argv 0] spawn echo $HOST: while {1} { expect "(yes/no)?" { send "yes\n"} expect "password:" { send "Bootit1234\n"} } ' done for i in $(seq -f "%03g" 236 400) do /tmp/a.ex dtraflocorh${i} done ################################# # Switch all logfile in RAC ################################# alter system switch all logfile ; ################################# # Tnsnames.ora file read ################################# egrep -i '^[A-Z]|HOST|service' titan_se.tns |while read line do if [ `echo $line |egrep -i '^[A-Z]' |wc -l` -gt 0 ]; then TNSALIAS=$(echo $line|cut -d= -f1); fi; if [ `echo $line |egrep -i 'HOST' |wc -l` -gt 0 ]; then HOST=$(echo $line|tr -d ' '| awk -F"HOST=" '{print $2 }'|cut -d')' -f1); fi; if [ `echo $line |egrep -i 'SERVICE' |wc -l` -gt 0 ]; then SERVICE=$(echo $line|tr -d ' '| awk -F"SERVICE_NAME=" '{print $2 }'|cut -d')' -f1); echo $TNSALIAS : $HOST : $SERVICE fi; done ------------------------------- -- Top SQL by CPU ------------------------------- select --TRUNC(s.END_INTERVAL_TIME), ss.dbid, ss.sql_id, SUM(ss.executions_delta) "Execs", ROUND((SUM(ss.cpu_time_delta)/1000000 )/60) "CPU Time (minutes)" , (select sql_text from dba_hist_sqltext sql where sql.dbid = ss.dbid and sql.sql_id = ss.sql_id) sql_text from dba_hist_sqlstat ss JOIN dba_hist_snapshot s on ss.dbid = s.dbid AND ss.INSTANCE_NUMBER = s.INSTANCE_NUMBER AND ss.SNAP_ID = s.snap_id where 1=1 --and ss.sql_id = '8xcfc1knuja0s' group by --TRUNC(s.END_INTERVAL_TIME), ss.dbid, ss.sql_id order by ROUND((SUM(ss.cpu_time_delta)/1000000 )/60) desc ; ################################# # Extract data in csv format ################################# set pagesize 0 set feedback off set colsep "," set trimout on set trimspool on set linesize 32767 set echo off spool outputfile.csv <<<<<>>>>> set term on set feedback on set colsep " " set pagesize 100 set echo on ################################# # Java Sleep in milliseconds ################################# CREATE OR REPLACE PROCEDURE sleep(x_millis IN NUMBER) AS LANGUAGE JAVA NAME 'java.lang.Thread.sleep(int)'; / ########################### # queue row count ########################### set lines 10000 set pages 10000 set feedback off spool vg.sql select 'select * from ( ' from dual union all SELECT 'SELECT ''' || owner || '.' || name || ''' as queue ' || chr(13)|| ', ' || '(select ROUND(sum(bytes)/power(1024,2)) from dba_segments s where s.owner = ''' || owner || ''' and s.segment_name = ''' || q.queue_table || ''') as "Size(MB)"' || chr(13)|| ', ' || '(select count(1) from ' || owner || '.' || queue_table || ') as row_count' || chr(13)|| ', ' || retention || ' retention from dual union all' FROM dba_queues q where q.owner like 'TRAF_%' and q.queue_type ='NORMAL_QUEUE' union all select ' select ''abc'' queue,0 "Size(MB)" , 1 rowcount,1 retention from dual' from dual union all select ') order by "Size(MB)" desc ; ' from dual ; spool off @html @./vg.sql ################################# # DST Timezone ################################# set lines 100 select version from v$timezone_file; column property_name format a30 column value format a30 select property_name, substr(property_value, 1, 30) value from database_properties where property_name like 'DST_%' order by property_name; select * from dba_tstz_tables where upgrade_in_progress = 'YES'; ################################# # IO Top (iotop) ################################# watch -n 2 'grep read_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -15 ; echo ""; grep write_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -15 ;' watch -n 2 'grep read_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -20' watch -n 2 'grep write_bytes /proc/*/io 2>/dev/null |sort -k2nr |head -20' watch -n 2 'for stat in read_bytes write_bytes do for i in `egrep ^$stat /proc/*/io 2>/dev/null |sort -k2nr |head -10 |cut -d/ -f3`; do echo PID: $i - $stat: `egrep ^$stat /proc/$i/io` , cmd - `cat /proc/$i/cmdline`; done echo "" done ' ---------------------------------------- -- DBA Registry ---------------------------------------- set lines 100 column comp_id format a20 column comp_name format a50 column status format a20 select comp_id, comp_name, status from dba_registry order by 1; ---------------------------------------- -- Database Options ---------------------------------------- set lines 100 column PARAMETER format a50 column value format a20 select PARAMETER, value from v$option order by 1; ################################# # SNMP Query ################################# snmpwalk -Os -c "public" -v 2c snmpdelta -Cs -CT -c "public" -v 2c orabackprd01 ipInReceives.0 ############################### # RAC Service role check ############################### export ORACLE_SID=12102 export ORAENV_ASK=NO . /usr/local/bin/oraenv for db in `srvctl config database` do export ORACLE_SID=$db . /usr/local/bin/oraenv 2>/dev/null 1>&2 echo "######################### $db #####################" for service in `srvctl config service -d $db|grep 'Service name' |cut -d: -f2|awk '{print $1}' |sort` do echo $service - `srvctl config service -d $db -s $service |grep 'Service role' |cut -d: -f2|awk '{print $1}'` done done