2026-03-12 20:23:15

This commit is contained in:
root
2026-03-12 21:23:47 +01:00
parent eab4b36eca
commit 93039b8489
3332 changed files with 699614 additions and 0 deletions

57
mhouri/AwrBulkCollect.sql Normal file
View File

@@ -0,0 +1,57 @@
-- +----------------------------------------------------------------------------+
-- | Author : Mohamed Houri |
-- |----------------------------------------------------------------------------|
-- | DATABASE : 12cR1 |
-- | Name : AwrBulkcollect.sql |
-- | PURPOSE : Dynamically create a SQL script to generate a list of AWR |
-- | reports between two snapshots dates. |
-- | NOTE : As with any code, ensure to test this script in a development |
-- | environment before attempting to run it in production. |
-- | Remarks : CHR(10) new line is mandatory. Unfortunately |
-- | SET termout off so that AWR content will not be displayed |
-- | For RAC configuration consider the instance number |
-- | Enter input date in this format :'mmddyyyy hh24:mi:ss' |
-- | |
-- ------------------------------------------------------------------------------
set termout off
set head off
set define off
set linesize 120
spool collectAWRs.sql
SELECT
'spool awr_XYZ_inst_1_'
|| t.si
|| '_'
|| t.se
|| '.text '
|| CHR(10)
-- || ' alter session set nls_language=''AMERICAN'';'
-- || CHR(13)
|| 'SELECT * FROM TABLE(dbms_workload_repository.awr_report_text('
|| t.dbid
|| ','
|| t.instance_number
|| ','
|| t.si
|| ','
|| t.se
|| '));'
|| CHR(10)
|| ' spool off;'
FROM
(
SELECT
dbid,
snap_id si,
snap_id + 1 se,
instance_number
FROM
dba_hist_snapshot
WHERE
begin_interval_time > TO_DATE('28102019 04:00:00', 'ddmmyyyy hh24:mi:ss')
AND end_interval_time <= TO_DATE('28102019 12:30:00', 'ddmmyyyy hh24:mi:ss')
AND instance_number = 1
) t;
spool off;

View File

@@ -0,0 +1,86 @@
--************************************************************************
-- Name : CurSelCubeHybridNonPop
-- Date : October 2017
-- Author : Mohamed Houri
-- Purpose: gives the selectivity low and high value range of a Hybrid
-- non-popular histogram bind variable having an endpoint number
-- when this bind variable is used in a bind aware cursor
--***********************************************************************
var num_rows number
var new_density number
begin
select num_rows into :num_rows from all_tables where table_name = upper ('&table_name');
end;
/
begin
SELECT
trunc(((BktCnt-PopBktCnt)/BktCnt)/(NDV-PopValCnt),10)
into :new_density
FROM
(SELECT
COUNT(1) PopValCnt,
SUM(endpoint_repeat_count) PopBktCnt,
ndv,
BktCnt,
pop_bucketSize
FROM
(SELECT
(sample_size - num_nulls) BktCnt,
num_distinct ndv,
num_buckets,
density OldDensity,
(sample_size-num_nulls)/num_buckets pop_bucketSize
FROM user_tab_col_statistics
WHERE
table_name = upper ('&table_name')
AND column_name = upper ('&column_name')
),
user_histograms
WHERE table_name = upper ('&table_name')
AND column_name = upper ('&column_name')
AND endpoint_repeat_count> pop_bucketSize
GROUP BY ndv,
BktCnt,
pop_bucketSize
);
end;
/
col bind format a10
select
bind
,round((sel_of_bind - offset),6) low
,round((sel_of_bind + offset),6) high
from
(select
bind
,value_count/:num_rows sel_of_bind
,0.1*(value_count/:num_rows) offset
from
(select
endpoint_actual_value bind
,(:num_rows*greatest(:new_density,endpoint_repeat_count/sample_size)) value_count
from
(select
sample_size
,endpoint_actual_value
,endpoint_repeat_count
from (select
ucs.sample_size
,uth.endpoint_actual_value
,uth.endpoint_repeat_count
from
user_tab_histograms uth
,user_tab_col_statistics ucs
where
uth.table_name = ucs.table_name
and uth.column_name = ucs.column_name
and uth.table_name = upper ('&table_name')
and uth.column_name = upper ('&column_name')
)
)
)
)
where bind = &bind;

View File

@@ -0,0 +1,67 @@
--*******************************************************************
-- Name : CurSelCubeHybridNonPopWithoutEndPoint
-- Date : October 2017
-- Author : Mohamed Houri
-- Purpose: gives the selectivity low and high value range of a Hybrid
-- non-popular histogram bind variable which has not been
-- captured by the histogram gathering program.
-- This low-high value range corresponds to the low-high
-- selectivity range of a bind aware cursor using this
-- bind variable value
--*****************************************************************|
var num_rows number
var new_density number
begin
select num_rows into :num_rows from all_tables where table_name = 'ACS_TEST_TAB';
end;
/
begin
SELECT
trunc(((BktCnt-PopBktCnt)/BktCnt)/(NDV-PopValCnt),10)
into :new_density
FROM
(SELECT
COUNT(1) PopValCnt,
SUM(endpoint_repeat_count) PopBktCnt,
ndv,
BktCnt,
pop_bucketSize
FROM
(SELECT
(sample_size - num_nulls) BktCnt,
num_distinct ndv,
num_buckets,
density OldDensity,
(sample_size-num_nulls)/num_buckets pop_bucketSize
FROM user_tab_col_statistics
WHERE
table_name = 'ACS_TEST_TAB'
AND column_name = 'RECORD_TYPE'
),
user_histograms
WHERE table_name = 'ACS_TEST_TAB'
AND column_name = 'RECORD_TYPE'
AND endpoint_repeat_count> pop_bucketSize
GROUP BY ndv,
BktCnt,
pop_bucketSize
);
end;
/
col bind format a10
select
&&bind
,round((sel_of_bind - offset),6) low
,round((sel_of_bind + offset),6) high
from
(select
&bind
,:new_density sel_of_bind
,0.1*(:new_density) offset
from dual
);

View File

@@ -0,0 +1,49 @@
--************************************************************************
-- Name : CurSelCubeHybridNonPop
-- Date : October 2017
-- Author : Mohamed Houri
-- Purpose: gives the selectivity low and high value range of a Hybrid
-- non-popular histogram bind variable having an endpoint number
-- when this bind variable is used in a bind aware cursor
--***********************************************************************
var num_rows number
var new_density number
begin
select num_rows into :num_rows from all_tables where table_name = upper ('&table_name');
end;
/
begin
SELECT
trunc(((BktCnt-PopBktCnt)/BktCnt)/(NDV-PopValCnt),10)
into :new_density
FROM
(SELECT
COUNT(1) PopValCnt,
SUM(endpoint_repeat_count) PopBktCnt,
ndv,
BktCnt,
pop_bucketSize
FROM
(SELECT
(sample_size - num_nulls) BktCnt,
num_distinct ndv,
num_buckets,
density OldDensity,
(sample_size-num_nulls)/num_buckets pop_bucketSize
FROM user_tab_col_statistics
WHERE
table_name = upper ('&table_name')
AND column_name = upper ('&column_name')
),
user_histograms
WHERE table_name = upper ('&table_name')
AND column_name = upper ('&column_name')
AND endpoint_repeat_count> pop_bucketSize
GROUP BY ndv,
BktCnt,
pop_bucketSize
);
end;
/

98
mhouri/CurrentSize.sql Normal file
View File

@@ -0,0 +1,98 @@
/* ----------------------------------------------------
| Author : Aziz Cherrabi |
| Updated : 24/08/2015 |
| Get size of the existing tablespace |
| except TEMP |
| |
|-----------------------------------------------------| */
column ts format a35 heading 'TABLESPACE'
column tst format a9 heading 'STATUS'
column vt format 99999999990 heading 'TOTAL|SPACE|(Mo)'
column vo format 99999999990 heading 'SPACE|USED|(Mo)'
column vr format 99999999990 heading 'SPACE|REMAINED|(Mo)'
column tx format 990 heading '%USED'
compute sum label 'Total tablespaces' of vt vo vr on report
break on report
with got_my_max
as (select
tablespace_name tbs,
Bytes_G,
maxbytes_G,
to_max_G,
case when maxbytes_G=0 then -1 else round((Bytes_G*100/maxbytes_G)) end pct
from
(
select
tablespace_name,
round(sum(nvl(BYTES,0))/1024/1024/1024) Bytes_G,
round(sum(nvl(MAXBYTES,0))/1024/1024/1024) maxbytes_G,
round(sum(nvl(MAXBYTES-BYTES,0))/1024/1024/1024) to_max_G
from (select
tablespace_name
,file_name,bytes
,case when nvl(maxbytes,0)=0 then bytes
else nvl(maxbytes,0) end MAXBYTES
from dba_data_files)
group by tablespace_name
)
)
select
t.tablespace_name
||decode(t.contents,'TEMPORARY',' (TEMPORARY/'||b.file_type||')','') ts,
t.status tst,
b.bytes/1024/1024 vt,
b.bytes_used/1024/1024 vo,
b.bytes_free/1024/1024 vr,
-- ceil(b.bytes_used*100/b.bytes) tx,
ceil(b.bytes_used*100/(b.bytes+(g.to_max_G*1024*1024*1024))) tx,
g.to_max_G to_max_G
from (
select
df.tablespace_name tablespace_name,
df.bytes bytes,
nvl(u.bytes_used,0) bytes_used,
nvl(f.bytes_free,0) bytes_free,
'DATAFILE' file_type
from
(select
tablespace_name,
sum(bytes) bytes
from
dba_data_files
group by
tablespace_name
) df,
(select
tablespace_name,
sum(bytes) bytes_used
from
dba_segments
group by
tablespace_name
) u,
(select
tablespace_name,
sum(bytes) bytes_free
from
dba_free_space
group by
tablespace_name
) f
where
df.tablespace_name = u.tablespace_name (+)
and
df.tablespace_name = f.tablespace_name (+)
) b,
dba_tablespaces t,
got_my_max g
where
t.tablespace_name = b.tablespace_name
and g.tbs = b.tablespace_name
order by tx desc, vo desc
;

36
mhouri/FindVW_all.sql Normal file
View File

@@ -0,0 +1,36 @@
/* ------------------------------------------------------------------------------------|
|Author : Mohamed Houri |
|Date : 03/07/2017 |
|Scope : gives all sql_id in memory and from AWR history using all VW_ transformation |
| |
---------------------------------------------------------------------------------------|*/
--
col sql_id format a15
col plan_hash_value format 999999999999
col object_name format a25
break on report
select
*
from
( select
sql_id
,plan_hash_value
,object_name
,cardinality
from
gv$sql_plan
where
object_name like '%VW%'
union
select
sql_id
,plan_hash_value
,object_name
,cardinality
from
dba_hist_sql_plan
where
object_name like '%VW%'
)
order by sql_id;

View File

@@ -0,0 +1,69 @@
rem
rem Script: FixProfileXmlFromCache.sql
rem Author: Original taken somewhere (Kerry Osborne or Carlos Sierra Or Aziz Cherrabi)
rem and updated by Mohamed Houri to consider hints > 500 bytes
rem and avoid ORA-06502
rem Dated: September 2016
rem Purpose: Transfer an execution plan of (hinted_sql_id) to a packaged query
rem
rem
rem Last tested
rem 12.1.0.2
rem
rem Usage:
rem SQL> @fixProfilefromCache
rem Enter value for sql_id_from: 2w9a295mxcjgx
rem Enter value for child_no_from: 0
rem Enter value for sql_id_to: addzft9frsckw
rem Enter value for sql_id_to: addzft9frsckw
rem Enter value for sql_id_to: addzft9frsckw
rem Notes : any feedback will be highly appreciated
rem
declare
ar_profile_xml clob;
cl_sql_text clob;
begin
-- get sql_id_from information
--
select
regexp_replace(other_xml,'.*(<outline_data>.*</outline_data>).*','\1')
into ar_profile_xml
from
gv$sql_plan
where
sql_id = '&sql_id_from'
and child_number = &child_no_from
and other_xml is not null
and rownum =1;
-- get information of sql_id_to
-- use gv$sql instead of g$sqlstats
-- to avoid query text being truncated when it is very big
begin
select
sql_fulltext into cl_sql_text
from
gv$sql
where
sql_id = '&sql_id_to';
exception
when NO_DATA_FOUND then
select
sql_text into cl_sql_text
from
dba_hist_sqltext
where
sql_id = '&sql_id_to'
and dbid = (select dbid from v$database);
end;
-- fix Profile
dbms_sqltune.import_sql_profile(
sql_text => cl_sql_text ,
profile_xml => ar_profile_xml ,
name => 'profile_'||'&&sql_id_to'||'_attach' ,
category => 'DEFAULT' ,
replace => true ,
force_match => TRUE
);
end;
/

22
mhouri/PXElaps.sql Normal file
View File

@@ -0,0 +1,22 @@
/* -----------------------------------------------------------------------------|
|Author : Mohamed Houri |
|Date : 03/07/2017 |
|Scope : gives time consumed by all PX slaves during a parallel query |
| |
--------------------------------------------------------------------------------|*/
compute sum label 'Total Slaves time' of elapsed on report
break on report
select
sql_id
,sql_exec_id
,sid
, process_name
, round(elapsed_time/1e6,2) elapsed
-- , round(cpu_time/1e6) cpu
--, round(concurrency_wait_time/1e6,2) conc
-- , round(user_io_wait_time/1e6) IO_Waits
from gv$sql_monitor
where sql_id = '&sql_id'
and sql_exec_id = '&exec_id'
and sql_text is null
order by round(elapsed_time/1e6,2);

View File

@@ -0,0 +1,324 @@
rem
rem Script: index_est_proc_2.sql
rem Author: Jonathan Lewis
rem Dated: August 2005 (updated Apr 2009)
rem Purpose: Fast analysis of indexes to help identify
rem extreme degeneration.
rem
rem Last tested
rem 11.1.0.7
rem 10.2.0.3
rem 10.1.0.4
rem 9.2.0.8
rem 8.1.7.4
rem Not tested
rem 11.2.0.1
rem 10.2.0.4
rem
rem Usage:
rem Set the values in the "define" section
rem Log on with the privilege to see the "dba_" views
rem using SQL*Plus and run the script.
rem
rem Notes:
rem This script assumes that statistics have been collected in
rem the fairly recent past, and uses some approximations to
rem compare the number of leaf blocks with the number of leaf
rem blocks that ought to be needed to hold the data.
rem
rem There are various little oddities with the way that
rem (a) Oracle calculates average column lenght and
rem (b) I use the available data
rem that mean that at small sizes and in extreme cases the
rem numbers I produce can be wrong. In particular, for indexes
rem where a lot of the table data has nulls (so no entry in the
rem index), the estimated size can be significantly larger than
rem they finally turn out to be.
rem
rem
rem Targets
rem =======
rem Where the estimate is very much smaller than the actual, then
rem you may be looking at a "FIFO" index, emptying out in the past
rem and filling in the future. This type of index is a candidate for
rem a regular "coalesce" - although you may want to rebuild it once
rem to get it to the right starting size and release excess space
rem back to the tablespace.
rem
rem See https://jonathanlewis.wordpress.com/2008/09/26/index-analysis/
rem for an example and discussion on this type of index.
rem
rem Where the estimate is about half the size of the actual, then
rem it is worth checking whether there is any special treatment of
rem the data that is making this happen. 50% utilisation is fairly
rem common in RAC for indexes based on a sequence with a large cache
rem size, so it may be best to leave the indexes at that level.
rem However, you may find that rebuilding (perhaps just once) with
rem a pctfree in the region of 30% may give you a slightly more efficient
rem index in non-RAC systems.
rem
rem If your index is running at 50% and is not strongly sequence based
rem then you may be suffering from the concurrency/ITL bug and may want
rem to rebuild the index and force a maxtrans setting into the index.
rem
rem If the index is running at a fairly uniform 25%, it may be subject
rem to side effects of both sequencing and the concurrency effects.
rem
rem Usage:
rem ======
rem This script takes a username (table owner), percent usage, and
rem scaling factor. It reports the estimated leaf block count of
rem all simple indexes for that schema where the size of the index
rem would be smaller than the supplied fraction of the current size
rem when rebuilt at the supplied percentage utilisation. Current settings
rem are 90% (which equates to the default pctfree 10) and 0.6 which means
rem the index would be running at about 50% empty wastage - which is the
rem point at which it begins to be a possible target for investigation.
rem The script does not report any index smaller than 10,000 leaf blocks,
rem and assumes an 8KB block size.
rem
rem Technical notes:
rem ================
rem Don't need to add a length byte after using dbms_stats
rem Don't need a 'descending' byte because it's automatically included
rem Don't need to adjust for num_nulls because it's automatically included
rem Reverse key indexes don't affect column lengths
rem
rem Need to worry about COMPRESSED indexes. At present compression
rem may reduce the size of an index so that I don't notice it should
rem still be smaller than it is.
rem
rem Index types that can be used (with partitioned = 'NO')
rem NORMAL
rem NORMAL/REV
rem FUNCTION-BASED NORMAL
rem
rem Still needs enhancing for partitioned and subpartitioned indexes
rem Check dba_part_indexes for locality, partitioning_type, subpartitioning_type
rem But does handle global indexes on partitioned tables.
rem
rem To investigate
rem LOB
rem IOT - TOP
rem IOT - NESTED
rem SECONDARY
rem BITMAP (and BITMAP JOIN)
rem FUNCTION-BASED BITMAP
rem CLUSTER
rem ANSI ?
rem
rem Probably not possible
rem DOMAIN
rem FUNCTION-BASED DOMAIN
rem
rem Need to avoid partitioned, temporary, unusable and dropped indexes
rem
rem
rem Update : December 2016 by Mohamed Houri to consider Locally
rem Partitioned and SUB-Partitioned indexes
rem : Any error is this script is mine.
rem : This script has been tested on running systems and seems giving
rem acceptable results. But still I have not enough feedback. So use it
rem carefully.
rem
set serveroutput on size 1000000 format wrapped
define m_owner = '&m_schemaname'
define m_blocksize = 8192
define m_target_use = 90 -- equates to pctfree 10
define m_scale_factor = 0.6
define m_minimum = 1000
define m_overhead = 192 -- leaf block "lost" space in index_stats
set verify off
set serveroutput on size 1000000 format wrapped
declare
m_leaf_estimate number;
ln_size number;
begin
for r in
(
select
ww.table_owner,
ww.table_name,
ww.index_owner,
ww.index_name,
ww.partition_name,
ww.leaf_blocks,
ww.status,
ww.part_level
from -- top level partitioned index
(
select
a.table_owner,
a.table_name,
b.index_owner,
b.index_name,
b.partition_name,
b.leaf_blocks,
b.status,
'TOP' part_level
from
dba_indexes a
,dba_ind_partitions b
where
a.owner = b.index_owner
and a.index_name = b.index_name
and a.owner = upper('&m_owner')
and a.partitioned = 'YES'
and a.temporary = 'N'
and a.dropped = 'NO'
and b.status != 'UNUSABLE'
and a.last_analyzed is not null
union all
-- sub partitioned indexes
select
a.table_owner,
a.table_name,
c.index_owner,
c.index_name,
c.subpartition_name,
c.leaf_blocks,
c.status,
'SUB' part_level
from
dba_indexes a
,dba_ind_subpartitions c
where
a.owner = c.index_owner
and a.index_name = c.index_name
and a.owner = upper('&m_owner')
and a.partitioned = 'YES'
and a.temporary = 'N'
and a.dropped = 'NO'
and c.status != 'UNUSABLE'
and a.last_analyzed is not null
)ww
order by
ww.index_owner, ww.table_name, ww.index_name,ww.partition_name
) loop
if r.leaf_blocks > &m_minimum then
select
round( 100 / &m_target_use * (ind.num_rows * (tab.rowid_length + ind.uniq_ind + 4) + sum((tc.avg_col_len) *(tab.num_rows)) ) /(&m_blocksize - &m_overhead)
) index_leaf_estimate
into m_leaf_estimate
from
(
select /*+ no_merge */
a.partition_name,
a.num_rows,
10 rowid_length
from
dba_ind_partitions a
where
a.index_owner = r.index_owner
and a.index_name = r.index_name
and a.partition_name = r.partition_name
union all
select /*+ no_merge */
a.subpartition_name,
a.num_rows,
10 rowid_length
from
dba_ind_subpartitions a
where
a.index_owner = r.index_owner
and a.index_name = r.index_name
and a.subpartition_name = r.partition_name
) tab,
(
select /*+ no_merge */
a.index_name,
a.num_rows,
decode(uniqueness,'UNIQUE',0,1) uniq_ind
from
dba_ind_partitions a
,dba_indexes b
where
a.index_name = b.index_name
and a.index_owner = r.index_owner
and b.table_name = r.table_name
and b.owner = r.index_owner
and a.index_name = r.index_name
and a.partition_name = r.partition_name
union all
select /*+ no_merge */
c.index_name,
c.num_rows,
decode(uniqueness,'UNIQUE',0,1) uniq_ind
from
dba_ind_subpartitions c
,dba_indexes b
where
c.index_name = b.index_name
and c.index_owner = r.index_owner
and b.table_name = r.table_name
and b.owner = r.index_owner
and c.index_name = r.index_name
and c.subpartition_name = r.partition_name
) ind,
(
select /*+ no_merge */
column_name
from
dba_ind_columns
where
table_owner = r.table_owner
and index_owner = r.index_owner
and table_name = r.table_name
and index_name = r.index_name
) ic,
(
select /*+ no_merge */
column_name,
avg_col_len
from
dba_tab_cols
where
owner = r.table_owner
and table_name = r.table_name
) tc
where
tc.column_name = ic.column_name
group by
ind.num_rows,
ind.uniq_ind,
tab.rowid_length
;
if m_leaf_estimate < &m_scale_factor * r.leaf_blocks then
select sum(round(bytes/1024/1024,2)) into ln_size
from dba_segments a
where segment_name = r.index_name
and partition_name = r.partition_name;
if ln_size is not null
then
dbms_output.new_line;
dbms_output.put_line
(
'table name ' || ': ' || trim(r.table_name) || ' - ' ||
'index name ' || ': ' || trim(r.index_name) ||
' - Partition Name :' || trim(r.partition_name) ||
' - Partition Level:' || r.part_level
);
dbms_output.put_line
(
'--> Current partition index size (MB): ' ||
round(ln_size,2) ||
' Expected Partition index size (MB): ' ||
round(ln_size * (m_leaf_estimate/r.leaf_blocks),2)
);
dbms_output.new_line;
end if;
end if;
end if;
end loop;
end;
/
set verify on

13
mhouri/QCElaps.sql Normal file
View File

@@ -0,0 +1,13 @@
select
sql_id
, round(elapsed_time/1e6,2) QC_time
, px_maxdop DOP
, px_servers_requested
, px_servers_allocated
, round(cpu_time/1e6) cpu
--, round(concurrency_wait_time/1e6,2) conc
, round(user_io_wait_time/1e6) IO_Waits
from gv$sql_monitor
where sql_id = '&sql_id'
and sql_exec_id = '&exec_id'
and sql_text is not null;

24
mhouri/QcPxElaps.sql Normal file
View File

@@ -0,0 +1,24 @@
compute sum label 'Query time' of wall_clock_time on report
break on report
with px as (select max(px_maxdop) px_maxdop
from gv$sql_monitor
where sql_id ='&sql_id'
and sql_exec_id = '&sql_exec_id')
select
sql_id
,round(elapsed_time/1e6,2) wall_clock_time
,px_maxdop
from gv$sql_monitor
where sql_id = '&sql_id'
and sql_exec_id = '&exec_id'
and sql_text is null
union all
select
sql_id
,round(elapsed_time/1e6,2) wall_clock_time
,null
from gv$sql_monitor,px
where sql_id = '&sql_id'
and sql_exec_id = '&exec_id'
and sql_text is null
order by round(elapsed_time/1e6,2)

1
mhouri/README.md Normal file
View File

@@ -0,0 +1 @@
I share some SQL scripts that I use regularly in my work as a consultant specialized around the Oracle database

90
mhouri/SizeCurrent2.sql Normal file
View File

@@ -0,0 +1,90 @@
column ts format a35 heading 'TABLESPACE'
column tst format a9 heading 'STATUS'
column vt format 99999999990 heading 'TOTAL|SPACE|(GB)'
column vo format 99999999990 heading 'SPACE|USED|(GB)'
column vr format 99999999990 heading 'SPACE|REMAINED|(GB)'
column tx format 990 heading '%USED'
compute sum label 'Total tablespaces' of vt vo vr on report
break on report
with got_my_max
as (select
tablespace_name tbs,
Bytes_G,
maxbytes_G,
to_max_G,
case when maxbytes_G=0 then -1 else round((Bytes_G*100/maxbytes_G)) end pct
from
(
select
tablespace_name,
round(sum(nvl(BYTES,0))/1024/1024/1024) Bytes_G,
round(sum(nvl(MAXBYTES,0))/1024/1024/1024) maxbytes_G,
round(sum(nvl(MAXBYTES-BYTES,0))/1024/1024/1024) to_max_G
from (select
tablespace_name
,file_name,bytes
,case when nvl(maxbytes,0)=0 then bytes
else nvl(maxbytes,0) end MAXBYTES
from dba_data_files)
group by tablespace_name
)
)
select
t.tablespace_name
||decode(t.contents,'TEMPORARY',' (TEMPORARY/'||b.file_type||')','') ts,
t.status tst,
b.bytes/1024/1024/1024 vt,
b.bytes_used/1024/1024/1024 vo,
b.bytes_free/1024/1024/1024 vr,
-- ceil(b.bytes_used*100/b.bytes) tx,
ceil(b.bytes_used*100/(b.bytes+(g.to_max_G*1024*1024*1024))) tx,
g.to_max_G to_max_G
from (
select
df.tablespace_name tablespace_name,
df.bytes bytes,
nvl(u.bytes_used,0) bytes_used,
nvl(f.bytes_free,0) bytes_free,
'DATAFILE' file_type
from
(select
tablespace_name,
sum(bytes) bytes
from
dba_data_files
group by
tablespace_name
) df,
(select
tablespace_name,
sum(bytes) bytes_used
from
dba_segments
group by
tablespace_name
) u,
(select
tablespace_name,
sum(bytes) bytes_free
from
dba_free_space
group by
tablespace_name
) f
where
df.tablespace_name = u.tablespace_name (+)
and
df.tablespace_name = f.tablespace_name (+)
) b,
dba_tablespaces t,
got_my_max g
where
t.tablespace_name = b.tablespace_name
and g.tbs = b.tablespace_name
order by tx desc, vo desc
;

24
mhouri/SpmTranfer.sql Normal file
View File

@@ -0,0 +1,24 @@
declare
v_sql_text clob;
ln_plans pls_integer;
begin
select replace(sql_fulltext, chr(00), ' ')
into v_sql_text
from gv$sqlarea
where sql_id = trim('&original_sql_id')
and rownum = 1;
-- create sql_plan_baseline for original sql using plan from modified sql
ln_plans := dbms_spm.load_plans_from_cursor_cache (
sql_id => trim('&modified_sql_id'),
plan_hash_value => to_number(trim('&plan_hash_value')),
sql_text => v_sql_text );
dbms_output.put_line('Plans Loaded: '||ln_plans);
end;
/

69
mhouri/UndoHist.sql Normal file
View File

@@ -0,0 +1,69 @@
----- ./MohamedUndoHist.sql ------------------------
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Author : Mohamed Houri
Date : 02/03/2020
Scope : comments to be added here
: check historical undo, particularly ORA-01555
: input dates have to be changed as a subtition parameters
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
col snap_begin format a25
col maxquerysqlid format a15
col maxquerylen format 9999999
col txncount format 9999999
col ora_01555 format 99
col undoblks format 9999999
col undoConsump format 9999999
col tuned_undoretention format 9999999
col activeblks format 9999999
col unexpiredblks format 9999999
col expiredblks format 9999999
compute sum label 'Total Unexpired' of unexpiredblks on report
break on report
SELECT
snap_begin
,maxquerysqlid
,maxquerylen
,txncount
,unxpstealcnt
,unxpblkrelcnt
,unxpblkreucnt
,expstealcnt
,expblkrelcnt
,expblkreucnt
,nospaceerrcnt
,ssolderrcnt ora_01555
,round(undoblks * 8 / 1024) undo_mb
,tuned_undoretention
,activeblks
,unexpiredblks
,expiredblks
FROM
(SELECT
sn.begin_interval_time snap_begin
,sn.instance_number inst
,st.maxquerylen
,st.maxquerysqlid
,st.unxpstealcnt
,st.unxpblkrelcnt
,st.unxpblkreucnt
,st.expstealcnt
,st.expblkrelcnt
,st.expblkreucnt
,st.ssolderrcnt
,st.nospaceerrcnt
,st.txncount
,st.undoblks
,st.tuned_undoretention
,st.activeblks
,st.unexpiredblks
,st.expiredblks
FROM
dba_hist_undostat st,
dba_hist_snapshot sn
WHERE st.snap_id = sn.snap_id
AND begin_interval_time between to_date('06032020 04:00:00','ddmmyyyy hh24:mi:ss')
and to_date('09032020 07:00:00','ddmmyyyy hh24:mi:ss')
)
ORDER by 1 asc, 3 desc;

17
mhouri/ashPerAction.sql Normal file
View File

@@ -0,0 +1,17 @@
----- ./ashPerAction2.sql ------------------------
column cnt format 9999999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_id, sql_plan_hash_value, count(1) cnt
from
gv$active_session_history
where
sample_time between to_date('&datefrom', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&dateto', 'mm/dd/yyyy hh24:mi:ss')
and action = '&ACTION'
group by sql_id, sql_plan_hash_value
order by 2 desc
;

16
mhouri/ashPlanLineId.sql Normal file
View File

@@ -0,0 +1,16 @@
column cnt format 9999999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_plan_line_id
,count(1) cnt
from
gv$active_session_history
where
sample_time between to_date('&datefrom', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&dateto', 'mm/dd/yyyy hh24:mi:ss')
and sql_id = '&sql_id'
group by sql_plan_line_id
order by 2 desc;

17
mhouri/ashPlanLineId2.sql Normal file
View File

@@ -0,0 +1,17 @@
column cnt format 9999999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_plan_line_id
,count(1) cnt
from
gv$active_session_history
where
sample_time between to_date('&datefrom', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&dateto', 'mm/dd/yyyy hh24:mi:ss')
and sql_id = '&sql_id'
and sql_exec_id = &sql_exec_id
group by sql_plan_line_id
order by 2 desc;

47
mhouri/ashgetBreak.sql Normal file
View File

@@ -0,0 +1,47 @@
--|-----------------------------------------------------------|
--|Author : Mhouri |
--|scope : get Origin of the SQL*Netbreak/reset to client |
--| |
--|-----------------------------------------------------------|
col top_level_call for a25
col osuser for a20
col osuser for a20
col osuser for a20
with got_my_sid
as(select
session_id
,inst_id
,count(1) cnt
from
gv$active_session_history
where
sample_time between to_date('10072015 11:00:00', 'ddmmyyyy hh24:mi:ss')
and to_date('10072015 12:00:00', 'ddmmyyyy hh24:mi:ss')
and event = 'SQL*Net break/reset to client'
group by session_id, inst_id
having count(1) > 10
)
select
a.inst_id
,a.session_id
,a.cnt elaps
,s.schema#
,s.schemaname
,s.top_level_call#
,(select l.top_level_call_name from v$toplevelcall l
where l.top_level_call# = s.top_level_call#
) top_level_call
,s.osuser
,s.username sess_user
,p.username proc_user
,p.tracefile
--,p.pga_used_mem
-- ,p.pga_alloc_mem
from
gv$process p
,gv$session s
,got_my_sid a
where
p.addr = s.paddr
and a.session_id = s.sid
and a.inst_id = s.inst_id;

32
mhouri/ashtop20.sql Normal file
View File

@@ -0,0 +1,32 @@
--|-----------------------------------------------------------|
--|Scope : display awr top events and sql |
--|Usage : @aashtop20 "16032021 08:00:00" "16032021 09:00:00"|
--|-----------------------------------------------------------|
set feed off
define from_mmddyy="&1"
define to_mmddyy="&2"
clear break
select decode(event,null, 'on cpu', event) event, count(1)
from gv$active_session_history
where
sample_time between to_date('&from_mmddyy', 'ddmmyyyy hh24:mi:ss')
and to_date('&to_mmddyy', 'ddmmyyyy hh24:mi:ss')
group by event
order by 2 desc
fetch first 20 rows only;
select sql_id, count(1)
from gv$active_session_history
where
sample_time between to_date('&from_mmddyy', 'ddmmyyyy hh24:mi:ss')
and to_date('&to_mmddyy', 'ddmmyyyy hh24:mi:ss')
group by sql_id
order by 2 desc
fetch first 20 rows only;
undefine from_mmddyy
undefine to_mmddyy

19
mhouri/awrPlanLineId.sql Normal file
View File

@@ -0,0 +1,19 @@
column cnt format 9999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_plan_line_id
,10* count(1) cnt
from
dba_hist_active_sess_history
where
sample_time between to_date('&datefrom', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&dateto', 'mm/dd/yyyy hh24:mi:ss')
and sql_id = '&sql_id'
group by sql_plan_line_id
order by 2 desc;

28
mhouri/checkUndoAsh.sql Normal file
View File

@@ -0,0 +1,28 @@
/* --------------------------------------------------------------
Author : Mohamed Houri
Date : 24/08/2015
If you want to know what object are read
in ASH then use the following script
Particularly :
-- if current_obj = 0 then this means you are reading from
undo block(useful to check read consistency)
-- if current_obj = -1 then this means you are working on cpu
----------------------------------------------------------------------- */
select
decode(current_obj#
,0
,'undo block'
,-1
,'cpu'
,current_obj#) cur_obj
, count(1)
from
gv$active_session_history
where
sample_time between to_date('&date_from', 'ddmmyyyy hh24:mi:ss')
and to_date('&date_from', 'ddmmyyyy hh24:mi:ss')
and event = 'db file sequential read'
and sql_id = '&sql_id'
group by current_obj#
order by 2 asc;

2
mhouri/disProf.sql Normal file
View File

@@ -0,0 +1,2 @@
--Mhouri
EXEC DBMS_SQLTUNE.ALTER_SQL_PROFILE('&PROFILE_NAME','STATUS','DISABLED');

11
mhouri/disSPM.sql Normal file
View File

@@ -0,0 +1,11 @@
DECLARE
l_plans_altered PLS_INTEGER;
BEGIN
l_plans_altered := DBMS_SPM.alter_sql_plan_baseline(
sql_handle => '&SQL_HANDLE',
plan_name => '&plan_name',
attribute_name => 'enabled',
attribute_value => 'NO');
END;
/

7
mhouri/dropSPM.sql Normal file
View File

@@ -0,0 +1,7 @@
declare
spm_op pls_integer;
begin
spm_op := dbms_spm.drop_sql_plan_baseline (sql_handle => NULL, plan_name => '&plan_name');
end;
/

11
mhouri/enableCons.sql Normal file
View File

@@ -0,0 +1,11 @@
-- Enabling constraint using parallelism
--Mhouri
alter table table_name add constraint constraint_name primary key (col1, col2, col3)
using index
enable novalidate;
alter session force parallel ddl;
alter table table_name parallel 8;
alter table table_name modify constraint constraint_name validate;
alter table table_name noparallel;
alter session disable parallel ddl;

View File

@@ -0,0 +1,10 @@
col task_name for a50
compute sum label 'Total Size' of MB on report
break on report
select
task_name, count(1) cnt
from
dba_advisor_objects
group by
task_name
order by 2 desc;

1
mhouri/getAdvisors.sql Normal file
View File

@@ -0,0 +1 @@
select task_name, enabled from dba_autotask_schedule_control;

24
mhouri/getFRA.sql Normal file
View File

@@ -0,0 +1,24 @@
archive log list
show parameter db_flashback_retention_target
col db_recovery_file_dest for a45
col space_limit_GB for 9999999
col space_used_GB for 9999999
select
name as db_recovery_file_dest
,space_limit/power(1024,3) space_limit_GB
,space_used/power(1024,3) space_used_GB
,number_of_files
from
v$recovery_file_dest;
select
file_type
,percent_space_used
,percent_space_reclaimable
from
v$recovery_area_usage;

26
mhouri/getLOBname.sql Normal file
View File

@@ -0,0 +1,26 @@
--******************************************************************
-- |Name : getLOBname |
-- |Date : 08 2022 |
-- |Author : Mohamed Houri |
-- | |
--******************************************************************
col owner for a30
col table_name for a30
col segment_type for a30
col segment_name for a30
SELECT
a.owner
,b.table_name
,a.segment_type
,a.segment_name
FROM
dba_segments a
,dba_lobs b
WHERE
a.owner = b.owner
AND a.segment_name = b.segment_name
AND a.segment_name ='&lob_seg_name'
AND a.segment_type like '%LOB%';
;

19
mhouri/getPartCol.sql Normal file
View File

@@ -0,0 +1,19 @@
col partition_name format a20
col num_distinct format a20
col last_anal format a20
col column_name format a12
col user_stats format a12
select
partition_name
,column_name
--,num_distinct
--,density
,to_char(last_analyzed,'dd/mm/yyyy hh24:mi:ss') last_anal
,histogram
,notes
from
all_part_col_statistics
where owner = upper('&owner')
and table_name = upper('&table_name')
-- and column_name = upper('&column_name')
;

42
mhouri/getPartTab.sql Normal file
View File

@@ -0,0 +1,42 @@
col table_name format a20
col table_owner format a20
col pname format a20
col ppos format 99999
col last_anal format a20
col global_stats format a12
col user_stats format a12
compute sum label 'Total num_rows' of num_rows on report
break on report
set verify off
select
table_owner
,table_name
,partition_name pname
-- ,partition_position ppos
,sample_size
,global_stats
,user_stats
-- ,tablespace_name
,to_char(last_analyzed, 'dd/mm/yyyy hh24:mi:ss') last_anal
,num_rows
from
(
select
table_owner
,table_name
,partition_name
,partition_position
,subpartition_count
,num_rows
,sample_size
,global_stats
,user_stats
,last_analyzed
,tablespace_name
from
all_tab_partitions
where
table_owner = upper('&owner')
and
table_name = upper('&table_name')
);

34
mhouri/getPartTab1.sql Normal file
View File

@@ -0,0 +1,34 @@
col table_name format a20
col pname format a10
col ppos format 99999
col last_anal format a20
compute sum label 'Total num_rows' of num_rows on report
break on report
select
table_name
,partition_name pname
,partition_position ppos
-- ,subpartition_count
,num_rows
,tablespace_name
,to_char(last_analyzed, 'dd/mm/yyyy hh24:mi:ss') last_anal
from
(
select
table_name
,partition_name
,partition_position
,subpartition_count
,num_rows
,last_analyzed
,tablespace_name
from
all_tab_partitions
where
table_owner = upper('c##mhouri')
and
table_name = upper('t_acs_part')
);

43
mhouri/getPartTab2.sql Normal file
View File

@@ -0,0 +1,43 @@
col table_name format a30
col pname format a30
col ppos format 99999
col las_anal format a20
compute sum label 'Total num_rows' of num_rows on report
break on report
set verify off
select
table_name
,partition_name pname
,partition_position ppos
,subpartition_count
,num_rows
,tablespace_name
,to_date((regexp_replace( extract(dbms_xmlgen.getxmltype(
'select high_value from dba_tab_partitions
where table_owner='''||&owner||'''
and table_name='''||&table_name||'''
and partition_name='''||partition_name||''''
),'/ROWSET/ROW/HIGH_VALUE/text()').getstringval()
,'[^;]*apos; *([^;]*) *[^;]apos;.*','\1'))
,'yyyy-mm-dd hh24:mi:ss') high_value
,to_char(last_analyzed, 'dd/mm/yyyy hh24:mi:ss') last_anal
from
(
select
table_name
,partition_name
,partition_position
,subpartition_count
,num_rows
,last_analyzed
,tablespace_name
from
all_tab_partitions
where
table_owner = upper('&owner')
and
table_name = upper('&table_name')
);

43
mhouri/getPrefs.sql Normal file
View File

@@ -0,0 +1,43 @@
--*****************************************************************************
-- Name : get_prefs
-- Date : October 2018
-- Author : Mohamed Houri
-- Purpose: gets the value of dbms_stats preference at global or table level
--
--
-- Input arguments:
-- pname - preference name
-- The default value for following preferences can be retrieved.
-- CASCADE
-- DEGREE
-- ESTIMATE_PERCENT
-- METHOD_OPT
-- NO_INVALIDATE
-- GRANULARITY
-- PUBLISH
-- INCREMENTAL
-- INCREMENTAL_LEVEL
-- INCREMENTAL_STALENESS
-- GLOBAL_TEMP_TABLE_STATS
-- STALE_PERCENT
-- AUTOSTATS_TARGET
-- CONCURRENT
-- TABLE_CACHED_BLOCKS
-- OPTIONS
-- STAT_CATEGORY
-- PREFERENCE_OVERRIDES_PARAMETER
-- APPROXIMATE_NDV_ALGORITHM
-- AUTO_STAT_EXTENSIONS
-- WAIT_TIME_TO_UPDATE_STATS
--
--
-- ownname - owner name
-- tabname - table name
--
--
-- Exceptions:
-- ORA-20001: Invalid input values
--
--**************************************************************************
select dbms_stats.get_prefs('&preference', tabname => '&tablename') prefs from dual;

View File

@@ -0,0 +1,63 @@
--|-------------------------------------------------------------------|
--|Author : Mhouri |
--|Date : mars 2023 |
--|Scope : find all SQL statments calling scalar subquery function |
--| -- from memory |
--| -- from AWR |
--| UNION ALL is used instead of UNION because of distinct |
--| on CLOB (sql_text) |
--| Update : 20-03-2023 |
--| I workarround the distinct on CLOB by using |
--| dbms_lob.substr |
--|-------------------------------------------------------------------|
select
gv.sql_id
,gv.force_matching_signature as force_match_sign
,dbms_lob.substr(gv.sql_fulltext,32767)
from
gv$sql gv
where
gv.plsql_exec_time > 0
-- Exclude non applicative schemas
and gv.parsing_schema_name not in
(select
db.username
from
dba_users db
where
db.oracle_maintained ='Y'
)
-- Exclude INSERT/UPDATE/DELETE
-- exclude PL/SQL blocks
and gv.command_type not in ('2','6','7','47')
--exclude automatic tasks (stats, space, tuning)
and
gv.module != 'DBMS_SCHEDULER'
UNION ALL
select
st.sql_id
,st.force_matching_signature as force_match_sign
,dbms_lob.substr(qt.sql_text,32767)
from
dba_hist_sqlstat st
,dba_hist_sqltext qt
where
st.sql_id = qt.sql_id
and
st.plsexec_time_delta > 0
-- Exclude non applicative schemas
and st.parsing_schema_name not in
(select
db.username
from
dba_users db
where
db.oracle_maintained ='Y'
)
-- Exclude INSERT/UPDATE/DELETE/CREATE
-- Exclude PL/SQL blocks
and qt.command_type not in ('1','2','6','7','47')
--exclude automatic tasks (stats, space, tuning)
and
st.module != 'DBMS_SCHEDULER'
order by 2;

69
mhouri/getUndoHist.sql Normal file
View File

@@ -0,0 +1,69 @@
----- ./MohamedUndoHist.sql ------------------------
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|Author : Mohamed Houri |
|Date : 02/03/2020 |
|Scope : comments to be added here |
| : check historical undo, particularly ORA-01555 |
| : input dates have to be changed as a subtition parameters |
|++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
col snap_begin format a25
col maxquerysqlid format a15
col maxquerylen format 9999999
col txncount format 9999999
col ora_01555 format 99
col undoblks format 9999999
col undoConsump format 9999999
col tuned_undoretention format 9999999
col activeblks format 9999999
col unexpiredblks format 9999999
col expiredblks format 9999999
compute sum label 'Total Unexpired' of unexpiredblks on report
break on report
SELECT
snap_begin
,maxquerysqlid
,maxquerylen
,txncount
,unxpstealcnt
,unxpblkrelcnt
,unxpblkreucnt
,expstealcnt
,expblkrelcnt
,expblkreucnt
,nospaceerrcnt
,ssolderrcnt ora_01555
,round(undoblks * 8 / 1024) undo_mb
,tuned_undoretention
,activeblks
,unexpiredblks
,expiredblks
FROM
(SELECT
sn.begin_interval_time snap_begin
,sn.instance_number inst
,st.maxquerylen
,st.maxquerysqlid
,st.unxpstealcnt
,st.unxpblkrelcnt
,st.unxpblkreucnt
,st.expstealcnt
,st.expblkrelcnt
,st.expblkreucnt
,st.ssolderrcnt
,st.nospaceerrcnt
,st.txncount
,st.undoblks
,st.tuned_undoretention
,st.activeblks
,st.unexpiredblks
,st.expiredblks
FROM
dba_hist_undostat st,
dba_hist_snapshot sn
WHERE st.snap_id = sn.snap_id
AND begin_interval_time between to_date('&from_date','ddmmyyyy hh24:mi:ss')
and to_date('&to_date','ddmmyyyy hh24:mi:ss')
)
ORDER by 1 asc, 3 desc;

6
mhouri/getarchiv.sql Normal file
View File

@@ -0,0 +1,6 @@
-- check whether we are in archive mode log or not
archive log list
SELECT LOG_MODE from v$database;
show parameter recovery

29
mhouri/getredo.sql Normal file
View File

@@ -0,0 +1,29 @@
--|--------------------------------------------------------------------------------------|
--|MHouri |
--|thread# : représente le numéro du groupe dans lequel se trouve le redo log |
--|members : indique si le redo log est multiplexé (>1) ou pas(=1) |
--|redo_size_mb : la taille de chaque redo log file |
--|Total size : représente la taille réellement disponible pour gérer les transactions |
--| : cette taille ne prend pas en compte les redos log multiplexés. |
--|---------------------------------------------------------------------------------------|
column REDOLOG_FILE_NAME format a50
compute sum label 'Total size' of size_MB on report
break on report
select
a.group#,
a.thread#,
a.members,
a.sequence#,
a.archived,
a.status,
b.member as redolog_file_name,
trunc(a.bytes/power(1024,2),3) as redo_size_mb,
case when a.thread# = 1 then
trunc(a.bytes/power(1024,2),3)
else null end as size_MB
from gv$log a
join gv$logfile b on a.group#=b.group#
order by a.group# ;

36
mhouri/gv$getBreak.sql Normal file
View File

@@ -0,0 +1,36 @@
with got_my_sid as
(select
sid
,inst_id
,event
,total_waits
from
gv$session_event
where
event = 'SQL*Net break/reset to client'
and sid = (select sid from v$mystat where rownum = 1)
)
select
a.inst_id
,a.sid
,a.event
,a.total_waits
-- ,s.schema#
-- ,s.schemaname
-- ,s.top_level_call#
,(select l.top_level_call_name from v$toplevelcall l
where l.top_level_call# = s.top_level_call#
) top_level_call
,s.osuser
,s.username sess_user
,p.username proc_user
,p.tracefile
from
gv$process p
,gv$session s
,got_my_sid a
where
p.addr = s.paddr
and a.sid = s.sid
and a.inst_id = s.inst_id;

23
mhouri/gvsql.sql Normal file
View File

@@ -0,0 +1,23 @@
col object_status format a10
col end_of_fetch_count format 999
SELECT
p.sql_id
,p.child_number
--,p.plan_hash_value
,p.is_bind_sensitive bsens
,p.is_bind_aware baware
--,p.first_load_time
--,p.last_load_time
,p.executions
,p.end_of_fetch_count end_fetch
,p.invalidations
,p.object_status
FROM
gv$sql p
where
p.sql_id = '&sql_id'
and
p.is_shareable ='Y';

80
mhouri/h_hist1.sql Normal file
View File

@@ -0,0 +1,80 @@
/* ----------------------------------------------------------------------------------|
|Author : Mohamed Houri |
|Date : 03/07/2020 |
|Scope : This script gives historical column histogram values |
| -- I am using sys.WRI$_OPTSTAT_HISTHEAD_HISTORY for this purpose |
| -- I am only able to say whether, previously, there was HISTOGRAM or not |
| -- I can't show the historical type of Histogram |
| |
|Usage : SQL> @h_hist1 |
| Enter value for table_name: t1 |
| Enter value for owner: test |
| Enter value for col_name: n2 |
-------------------------------------------------------------------------------------|*/
col object_name format a20
col column_name format a12
col last_analyzed format a20
col prev_last_analyzed format a20
col histogram format a16
col prev_histogram format a16
WITH sq AS
(
SELECT
object_id
,object_name
,subobject_name
FROM
dba_objects
WHERE
object_name = upper ('&&table_name')
AND owner = upper('&&owner')
AND subobject_name IS NULL
)
SELECT
object_name
,column_name
,lead(prev_histogram,1,histogram) over (order by last_analyzed) histogram
,last_analyzed
,prev_histogram
,prev_last_analyzed
FROM
(
SELECT
object_name
,column_name
,(select histogram from all_tab_col_statistics where owner = upper('&&owner')
and table_name = upper('&&table_name') and column_name = upper('&&col_name')) histogram
,last_analyzed
,stat_time prev_last_analyzed
,row_number() over (order by last_analyzed) rn
,case when round(derivedDensity,9)= round(density,9) then 'NONE' else 'HISTOGRAM' end prev_histogram
FROM
(
SELECT
object_name
,column_name
,to_char(savtime ,'dd/mm/yyyy hh24:mi:ss') last_analyzed
,to_char(timestamp# ,'dd/mm/yyyy hh24:mi:ss') stat_time
,density
,1/distcnt derivedDensity
,row_number() over (order by savtime) rn
,lag(case when round(1/distcnt,9) = round(density,9) then 'NONE' else 'HISTOGRAM' end) over(order by savtime) hist_histogram
FROM
sys.WRI$_OPTSTAT_HISTHEAD_HISTORY
INNER JOIN sq ON object_id = obj#
INNER JOIN (SELECT
column_id
,column_name
FROM
dba_tab_columns
WHERE
column_name = upper('&&col_name')
AND table_name = upper('&&table_name')
AND owner = upper('&&owner')
) ON intcol# = column_id
)
WHERE
rn >= 1 --exlcude/include the very first dbms_stat
)
ORDER BY
last_analyzed;

24
mhouri/isInParse.sql Normal file
View File

@@ -0,0 +1,24 @@
column in_exec format a15
column in_hard_parse format a15
column in_parse format a15
column cnt format 9999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_exec_id
,in_sql_execution in_exec
,in_parse
,in_hard_parse
,count(1) cnt
from
gv$active_session_history
where
sql_id = '&sql_id'
group by
sql_exec_id
,in_sql_execution
,in_parse
,in_hard_parse
order by 5 desc
;

View File

@@ -0,0 +1,35 @@
--|-------------------------------------------------------------------|
--|Author : Mhouri |
--|Date : september 20222 |
--|Scope : this script helps identifying whether the current sql_id |
--| is really the one that is responsible for the time stored |
--| into ASH. In other words, we can state whether the input |
--| sql_id has triggered a recursive sql id or not |
--|-------------------------------------------------------------------|
column is_sqlid_current format a10
column event format a70
column current_obj# format a30
column cnt format 9999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
h.is_sqlid_current
,h.event
,ob.object_name
,count(1) cnt
from
gv$active_session_history h
join dba_objects ob
on ob.object_id = h.current_obj#
where
sample_time between to_date('&from_date', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&to_date', 'mm/dd/yyyy hh24:mi:ss')
and
sql_id = '&sql_id'
group by
h.is_sqlid_current
,h.event
,ob.object_name
order by 4 desc
;

29
mhouri/isSQLCurrent.sql Normal file
View File

@@ -0,0 +1,29 @@
--|-------------------------------------------------------------------|
--|Author : Mhouri |
--|Date : september 20222 |
--|Scope : this script helps identifying whether the current sql_id |
--| is really the one that is responsible for the time stored |
--| into ASH. In other words, we can state whether the input |
--| sql_id has triggered a recursive sql id or not |
--|-------------------------------------------------------------------|
column sql_current format a15
column cnt format 9999
compute sum label 'Total Elapsed' of cnt on report
break on report
select
sql_exec_id
,is_sqlid_current sql_current
,count(1) cnt
from
gv$active_session_history
where
sql_id = '&sql_id'
and
sample_time between to_date('&datefrom', 'mm/dd/yyyy hh24:mi:ss')
and to_date('&dateto', 'mm/dd/yyyy hh24:mi:ss')
group by
sql_exec_id
,is_sqlid_current
order by 3 desc
;

53
mhouri/isSQLusingSPM.sql Normal file
View File

@@ -0,0 +1,53 @@
--https://orastory.wordpress.com/2014/02/05/awr-was-a-baselined-plan-used/
with subq_mysql as
(select sql_id
, (select dbms_sqltune.sqltext_to_signature(ht.sql_text)
from dual) sig
from dba_hist_sqltext ht
where sql_id = '&sql_id')
, subq_baselines as
(select b.signature
, b.plan_name
, b.accepted
, b.created
, o.plan_id
from subq_mysql ms
, dba_sql_plan_baselines b
, sys.sqlobj$ o
where b.signature = ms.sig
and o.signature = b.signature
and o.name = b.plan_name)
, subq_awr_plans as
(select sn.snap_id
, to_char(sn.end_interval_time,'DD-MON-YYYY HH24:MI') dt
, hs.sql_id
, hs.plan_hash_value
, t.phv2
, ms.sig
from subq_mysql ms
, dba_hist_sqlstat hs
, dba_hist_snapshot sn
, dba_hist_sql_plan hp
, xmltable('for $i in /other_xml/info
where $i/@type eq "plan_hash_2"
return $i'
passing xmltype(hp.other_xml)
columns phv2 number path '/') t
where hs.sql_id = ms.sql_id
and sn.snap_id = hs.snap_id
and sn.instance_number = hs.instance_number
and hp.sql_id = hs.sql_id
and hp.plan_hash_value = hs.plan_hash_value
and hp.other_xml is not null)
select awr.*
, nvl((select max('Y')
from subq_baselines b
where b.signature = awr.sig
and b.accepted = 'YES'),'N') does_baseline_exist
, nvl2(b.plan_id,'Y','N') is_baselined_plan
, to_char(b.created,'DD-MON-YYYY HH24:MI') when_baseline_created
from subq_awr_plans awr
, subq_baselines b
where b.signature (+) = awr.sig
and b.plan_id (+) = awr.phv2
order by awr.snap_id;

View File

@@ -0,0 +1,25 @@
-- script used to check whether you have to stop/restart the database when you change
-- a particular parameter value or not
-- IMMEDIATE : no need to restart the instance
-- FALSE : you need to restart the instance
-- DEFERRED : session has to be reconnected to see the new value: but no need to stop/restart the instance
col name for a35
select
name
,issys_modifiable
--,ispdb_modifiable
from
gv$parameter
where
name = '&parameter_name'
union
select
n.ksppinm as name
, c.ksppstdf as issys_modifiable
from
sys.x$ksppi n
,sys.x$ksppcv c
where n.indx=c.indx
and lower(n.ksppinm) = lower('&parameter_name');

104
mhouri/loadSPMfromAWR.sql Normal file
View File

@@ -0,0 +1,104 @@
/* --|-------------------------------------------------------------------------|
--| Author : Mohamed Houri --|
--| Date : 08/06/2022 --|
--| inspired by : Carlos Sierra create_spb_from_awr.sq script --|
--| Scope : Create a SPM baseline from a historical execution plan --|
--| Usage : @loadSPMfromAWR --|
--| Remarks : the begin and end snap must be different --|
--| : the plan hash value must be present in the end snap --|
--| : for example if you are in the following case --|
--| --|
--| SQL> @loadSPMAWR --|
--| Enter sql_id: b64jvr5722ujx --|
--| --|
--| PLAN_HASH_VALUE AVG_ET_SECS EXECUTIONS_TOTAL --|
--| --------------- ----------- ---------------- --|
--| 1362763525 17.825681 1 --|
--| 1518369540 --|
--| --|
--| Enter Plan Hash Value: 1362763525 --|
--| --|
--| BEGIN_SNAP_ID END_SNAP_ID --|
--| ------------- ----------- --|
--| 692 692 --|
--| --|
--| Enter begin snap: 691 spot this --|
--| Enter end snap: 692 --|
--| --|
--| PL/SQL procedure successfully completed. --|
--| --|
--| RS --|
--| ---------- --|
--| 1 --|
--|-----------------------------------------------------------------------| */
acc sql_id prompt 'Enter sql_id: ';
with p as
(SELECT
plan_hash_value
FROM
dba_hist_sql_plan
WHERE
sql_id = trim('&&sql_id.')
AND other_xml IS NOT NULL )
,a as
(SELECT
plan_hash_value
,SUM(elapsed_time_total)/SUM(executions_total) avg_et_secs
,MAX(executions_total) executions_total
FROM
dba_hist_sqlstat
WHERE
sql_id = TRIM('&&sql_id.')
AND executions_total > 0
GROUP BY
plan_hash_value
)
SELECT
p.plan_hash_value
,ROUND(a.avg_et_secs/1e6, 6) avg_et_secs
,a.executions_total
FROM
p,a
WHERE
p.plan_hash_value = a.plan_hash_value(+)
ORDER BY
avg_et_secs NULLS LAST;
acc plan_hash_value prompt 'Enter Plan Hash Value: ';
COL dbid new_v dbid NOPRI;
SELECT dbid FROM v$database;
col begin_snap_id new_v begin_snap_id;
col end_snap_id new_v end_snap_id;
SELECT
MIN(p.snap_id) begin_snap_id
,MAX(p.snap_id) end_snap_id
FROM
dba_hist_sqlstat p
,dba_hist_snapshot s
WHERE
p.dbid = &&dbid
AND p.sql_id = '&&sql_id.'
AND p.plan_hash_value = to_number('&&plan_hash_value.')
AND s.snap_id = p.snap_id
AND s.dbid = p.dbid
AND s.instance_number = p.instance_number;
acc begin_snap_id prompt 'Enter begin snap: ';
acc end_snap_id prompt 'Enter end snap: ';
var rs number;
begin
:rs := dbms_spm.load_plans_from_awr(begin_snap => &&begin_snap_id.
,end_snap => &&end_snap_id.
,basic_filter => q'# sql_id = TRIM('&&sql_id.') and plan_hash_value = TO_NUMBER('&&plan_hash_value.') #');
end;
/
print rs;

View File

@@ -0,0 +1,17 @@
--|-------------------------------------------------------------|
--| Author : Mohamed Houri --|
--| Date : 08/06/2022 --|
--| Scope : Create a SPM baseline from a cursor cache --|
--| Usage : @loadSPMfromCache --|
--| Remarks : --|
--|-------------------------------------------------------------|
set serveroutput on
declare
spm_op pls_integer;
begin
spm_op := dbms_spm.load_plans_from_cursor_cache (sql_id => '&sqlid'
,plan_hash_value => to_number(trim('&plan_hash_value'))
);
dbms_output.put_line('Plans Loaded into SPB :'||spm_op);
end;
/

18
mhouri/nonsharedMho.sql Normal file
View File

@@ -0,0 +1,18 @@
select
sh.sql_id,
xt.c1,
xt.r1 raison,
xt.d1
from
gv$sql_shared_cursor sh
inner join xmltable (
'/ChildNode'
passing xmlparse(content sh.reason)
columns
c1 number path 'ChildNumber',
r1 varchar2(40) path 'reason',
d1 varchar2(40) path 'details'
)
xt on ( 1 = 1 )
where
sh.sql_id = '&sql_id';

19
mhouri/phv2.sql Normal file
View File

@@ -0,0 +1,19 @@
/* -----------------------------------------------------------------------------|
|Author : Mohamed Houri |
|Date : 03/07/2017 |
|Scope : gives plan_hash_value and phv2 that includes the predicate part |
| |
--------------------------------------------------------------------------------|*/
SELECT
p.sql_id
,p.plan_hash_value
,p.child_number
,t.phv2
FROM v$sql_plan p
,xmltable('for $i in /other_xml/info
where $i/@type eq "plan_hash_2"
return $i'
passing xmltype(p.other_xml)
columns phv2 number path '/') t
WHERE p.sql_id = '&1'
and p.other_xml is not null;

86
mhouri/sqlHist.sql Normal file
View File

@@ -0,0 +1,86 @@
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Author : Mohamed Houri
Date : 03/08/2016
Scope : This is an updated version of an existing script (over the web)
in which I have taken into account the following points:
-- Superfluous executions and not used plan_hash_value are excluded.
Superfluous plan_hash_value are inserted into dba_hist_sqlstat
because they were present in gv$sql at the AWR capture time.
This generally happens when a SQL_ID has several child cursors
in gv$sql. All these child cursors will be captured regardless
of their activity (used or not used). These superfluous executions
are excluded using the following where clause:
WHERE avg_lio != 0
But a "lock table" for example doesn't consume any logical I/O. This
is why I added the following extra where clause:
OR (avg_lio =0 AND avg_etime > 0)
-- When a query is run in PARALLEL the avg_etime represents the time
spent by all parallel servers concurrently. So if avg_px is not null then
avg_etime represents the cumulated time of all PX servers. To have
the approximate wall clock time of the query I have divided the
avg_time by the avg_px to obtain avg_px_time.
Warning : avg_px_time do not include the Query Coordinator time
I still have not figured out how to get the QC time from a historical
table. I have a script QCelaps.sql which gives the QC time but it
is based on gv$sql_monitor. This is obviously not reliable when
dealing with historical executions.
Investigation : may be I have to investigate the new 12c RTSM
historical tables : dba_hist_reports
dba_hist_reports_details
Update : 28-09-2016 : add end_of_fetch column
if end_of_fetch = 0 and exec = 1 then
this means that the query not finished during the snapshot
end if
When you see avg_rows = 0 this doesnt' necessarily means that
the query has not finished during the snapshot
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
col snap_begin format a25
col sql_profile format a20
col execs format 9999999
SELECT
snap_begin
,snap_id
,plan_hash_value
,sql_profile
,execs
,end_of_fetch
,avg_etime
,avg_px
,trunc(avg_etime/decode(avg_px,0,1,avg_px) ,2) avg_px_time
,avg_pio
,avg_lio
,avg_rows
FROM
(SELECT
sn.begin_interval_time snap_begin
,sn.snap_id
,plan_hash_value
,st.sql_profile
,executions_delta execs
,end_of_fetch_count_delta end_of_fetch
,trunc(elapsed_time_delta/1e6/decode(executions_delta, 0, 1, executions_delta)) avg_etime
,round(disk_reads_delta/decode(executions_delta,0,1, executions_delta),1) avg_pio
,round(buffer_gets_delta/decode(executions_delta,0,1, executions_delta), 1) avg_lio
,round(px_servers_execs_delta/decode(executions_delta,0,1, executions_delta), 1) avg_px
,round(rows_processed_delta/decode(executions_delta,0, 1, executions_delta), 1) avg_rows
FROM
dba_hist_sqlstat st,
dba_hist_snapshot sn
WHERE st.snap_id = sn.snap_id
AND st.instance_number = sn.instance_number
AND sql_id = '&sql_id'
AND begin_interval_time > to_date('&from_date','ddmmyyyy')
)
WHERE avg_lio != 0
OR (avg_lio =0 AND avg_etime > 0)
ORDER by 1 asc;

22
mhouri/sqlmonHist.sql Normal file
View File

@@ -0,0 +1,22 @@
SELECT report_id, key1 sql_id, key2 sql_exec_id, key3 sql_exec_start
FROM dba_hist_reports
WHERE component_name = 'sqlmonitor'
and key1 = '';
SELECT DBMS_AUTO_REPORT.REPORT_REPOSITORY_DETAIL(RID => 1022, TYPE => 'text')
FROM dual;
-- another way to do this
SELECT
report_id,
EXTRACTVALUE(XMLType(report_summary),'/report_repository_summary/sql/@sql_id') sql_id,
EXTRACTVALUE(XMLType(report_summary),'/report_repository_summary/sql/@sql_exec_id') sql_exec_id,
EXTRACTVALUE(XMLType(report_summary),'/report_repository_summary/sql/@sql_exec_start') sql_exec_start
FROM
dba_hist_reports
WHERE
component_name = 'sqlmonitor'
and EXTRACTVALUE(XMLType(report_summary),'/report_repository_summary/sql/@sql_id')='6qkdybw3ruwtx'
;

13
mhouri/sqlstats.sql Normal file
View File

@@ -0,0 +1,13 @@
ol execs format 99999999
select
child_number child
--, sql_profile
, plan_hash_value
, round(buffer_gets/decode(nvl(executions,0),0,1,executions)) avg_gets
, round(disk_reads/decode(nvl(executions,0),0,1,executions)) avg_pios
, (elapsed_time/1000000)/decode(nvl(executions,0),0,1,executions) avg_etime
, executions execs
from
gv$sql
where
sql_id = '&sql_id';

31
mhouri/xpgv$.sql Normal file
View File

@@ -0,0 +1,31 @@
SELECT
RPAD('Inst: '||v.inst_id, 9)||' '||RPAD('Child: '||v.child_number, 11) inst_child
,t.plan_table_output
FROM
gv$sql v,
TABLE(DBMS_XPLAN.DISPLAY('gv$sql_plan_statistics_all'
, NULL
, 'ADVANCED ALLSTATS LAST'
, 'inst_id = '||v.inst_id||'
AND sql_id = '''||v.sql_id||''' AND child_number = '||v.child_number
)
) t
WHERE
v.sql_id = '&&sql_id.'
AND v.loaded_versions > 0;
SELECT
t.plan_table_output
FROM
gv$sql v,
TABLE(DBMS_XPLAN.DISPLAY('gv$sql_plan_statistics_all'
, NULL
, 'ADVANCED ALLSTATS LAST'
, 'inst_id = '||v.inst_id||'
AND sql_id = '''||v.sql_id||''' AND child_number = '||v.child_number
)
) t
WHERE
v.sql_id = '&&sql_id.'
AND v.loaded_versions > 0;

1
mhouri/xpsimp.sql Normal file
View File

@@ -0,0 +1 @@
select * from table(dbms_xplan.display_cursor);