2026-03-12 21:01:38

This commit is contained in:
2026-03-12 22:01:38 +01:00
parent 3bd1db26cc
commit 26296b6d6a
336 changed files with 27507 additions and 0 deletions

View File

@@ -0,0 +1,86 @@
# Tracking column histogram modifications by M.Houri
# https://hourim.wordpress.com/2020/08/06/historical-column-histogram/
create table T1 tablespace TS1 as
select rownum id, decode(mod(rownum,10),0,2,1) c_freq, nvl(blocks,999) c_hb
from dba_tables ;
update T1 set c_freq=3 where rownum<=10;
commit;
create index idx_freq on T1(C_FREQ) tablespace TS1;
create index idx_hb on T1(C_HB) tablespace TS1;
select c_freq,count(*) from T1 group by c_freq order by 2 desc;
exec dbms_stats.gather_table_stats (user, 'T1', method_opt=>'for all columns size 1');
col column_name for a20
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1' and column_name='C_FREQ';
select /*+ GATHER_PLAN_STATISTICS */ * from T1 where C_FREQ=3;
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1' and column_name='C_HB';
select /*+ GATHER_PLAN_STATISTICS */ * from T1 where C_HB=999;
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
---------------- FREQ
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for columns C_FREQ size AUTO');
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1' and column_name='C_FREQ';
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency
from user_tab_histograms
where table_name = 'T1' and column_name = 'C_FREQ';
alter system flush shared_pool;
select /*+ GATHER_PLAN_STATISTICS */ * from T1 where C_FREQ=3;
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
--------------- WEIGHT
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for columns C_HB size 254');
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1' and column_name='C_HB';
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency
from user_tab_histograms
where table_name = 'T1' and column_name = 'C_HB';
create table T1 tablespace TS1 as
select rownum id, decode(mod(rownum,10),0,2,1) c_freq, nvl(blocks,999) c_hb
from dba_extents ;
update T1 set c_freq=3 where rownum<=10;
commit;

252
histograms/histogram_02.txt Normal file
View File

@@ -0,0 +1,252 @@
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
decode(mod(rownum,10),0,10,1) col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100000
)
/
---------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
decode(mod(rownum,3),0,'m3',
decode(mod(rownum,5),0,'m5',
decode(mod(rownum,7),0,'m7',
decode(mod(rownum,11),0,'m11',
decode(mod(rownum,13),0,'m13',
decode(mod(rownum,17),0,'m17',
'other')))))) col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100000
)
/
------------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
case when rownum<=10 then rownum else 99999 end col1,
case when rownum<=400 then rownum else 99999 end col2,
case when rownum<=4000 then rownum else 99999 end col3,
case when rownum<=10000 then rownum else 99999 end col4
from ( select 1 just_a_column
from DUAL
connect by level <= 100000
)
/
---------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
case when rownum>=1 and rownum<1000 then mod(rownum,10) else 99999 end col1,
case when rownum>=1 and rownum<99900 then mod(rownum,1000) else rownum end col2,
mod(rownum,300) col3
from ( select 1 just_a_column
from DUAL
connect by level <= 100000
)
/
---------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
mod(rownum,254) col1,
mod(rownum,255) col2,
mod(rownum,256) col3
from ( select 1 just_a_column
from DUAL
connect by level <= 100000
)
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size SKEWONLY');
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1';
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency
from user_tab_histograms
where table_name = 'T1' and column_name = 'COL4';
select col1,count(*) from T1 group by col1 order by 2 desc;
--------------------
https://www.red-gate.com/simple-talk/databases/oracle-databases/12c-histogram-top-frequency/
drop table T_TopFreq purge;
create table T_TopFreq as
select
rownum n1
, case when mod(rownum, 100000) = 0 then 90
when mod(rownum, 10000) = 0 then 180
when mod(rownum, 1000) = 0 then 84
when mod(rownum, 100) = 0 then 125
when mod(rownum,50) = 2 then 7
when mod(rownum-1,80) = 2 then 22
when mod(rownum, 10) = 0 then 19
when mod(rownum-1,10) = 5 then 15
when mod(rownum-1,5) = 1 then 11
when trunc((rownum -1/3)) < 5 then 25
when trunc((rownum -1/5)) < 20 then 33
else 42
end n2
from dual
connect by level <= 2e2
/
set serveroutput ON
exec dbms_stats.set_global_prefs ('TRACE', to_char (1+16));
exec dbms_stats.gather_table_stats (user,'T_TOPFREQ',method_opt=> 'for columns n2 size 8');
exec dbms_stats.set_global_prefs('TRACE', null);
select
sum (cnt) TopNRows
from (select
n2
,count(*) cnt
from t_topfreq
group by n2
order by count(*) desc
)
where rownum <= 8;
with FREQ as
( select
n2
,count(*) cnt
from t_topfreq
group by n2
order by count(*) desc
)
select sum(cnt) from FREQ where rownum<=8;
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T_TOPFREQ';
--------------------------------------------------------------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
mod(rownum,300) col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100e3
)
/
update T1 set col1=567 where id between 70e3 and 75e3;
update T1 set col1=678 where id between 75e3 and 90e3;
update T1 set col1=789 where id between 90e3 and 100e3;
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size SKEWONLY');
-- type de histogram
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1';
-- how many rows are in the TOP-N values ?
with FREQ as
( select
col1
,count(*) cnt
from T1
group by col1
order by count(*) desc
)
select sum(cnt) from FREQ where rownum<=254
;
-- frequency by column value / bucket
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency,
ENDPOINT_REPEAT_COUNT
from user_tab_histograms
where table_name = 'T1' and column_name = 'COL1';
--------------------------------------------------------------
--------------------------------------------------------------
drop table T1 purge;
create table T1 tablespace TS1 as
select
rownum id,
mod(rownum,2000) col1
from ( select 1 just_a_column
from DUAL
connect by level <= 1000e3
)
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 2048');
-- type de histogram
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from user_tab_col_statistics
where table_name='T1';

120
histograms/histogram_03.txt Normal file
View File

@@ -0,0 +1,120 @@
create pluggable database NEREUS admin user PDB$OWNER identified by secret;
alter pluggable database NEREUS open;
alter pluggable database NEREUS save state;
alter session set container=NEREUS;
show pdbs
show con_name
grant sysdba to adm identified by secret;
alias NEREUS='rlwrap sqlplus adm/secret@bakura/NEREUS as sysdba'
create tablespace USERS datafile size 32M autoextend ON next 32M;
alter database default tablespace USERS;
create user HR identified by secret
quota unlimited on USERS;
grant CONNECT,RESOURCE to HR;
grant CREATE VIEW to HR;
wget https://raw.githubusercontent.com/oracle-samples/db-sample-schemas/main/human_resources/hr_cre.sql
wget https://raw.githubusercontent.com/oracle-samples/db-sample-schemas/main/human_resources/hr_popul.sql
connect HR/secret@bakura/NEREUS
spool install.txt
@hr_cre.sql
@hr_popul.sql
alter user HR no authentication;
select /*+ GATHER_PLAN_STATISTICS */
emp.FIRST_NAME
, emp.LAST_NAME
, dept.DEPARTMENT_NAME
from
HR.EMPLOYEES emp,
HR.DEPARTMENTS dept
where
emp.DEPARTMENT_ID = dept.DEPARTMENT_ID
order by
FIRST_NAME,
LAST_NAME
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
exec dbms_stats.delete_table_stats('HR','EMPLOYEES');
exec dbms_stats.delete_table_stats('HR','DEPARTMENTS');
alter system flush shared_pool;
exec dbms_stats.gather_table_stats('HR','EMPLOYEES', method_opt=>'for all columns size SKEWONLY');
exec dbms_stats.gather_table_stats('HR','DEPARTMENTS', method_opt=>'for all columns size SKEWONLY');
exec dbms_stats.gather_table_stats('HR','EMPLOYEES', method_opt=>'for all columns size 254');
exec dbms_stats.gather_table_stats('HR','DEPARTMENTS', method_opt=>'for all columns size 254');
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from dba_tab_col_statistics
where owner='HR' and table_name='EMPLOYEES' and column_name='DEPARTMENT_ID';
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency
from dba_tab_histograms
where owner='HR' and table_name='EMPLOYEES' and column_name='DEPARTMENT_ID';
select column_name,num_distinct,density,num_nulls,num_buckets,sample_size,histogram
from dba_tab_col_statistics
where owner='HR' and table_name='DEPARTMENTS' and column_name='DEPARTMENT_ID';
select endpoint_value as column_value,
endpoint_number as cummulative_frequency,
endpoint_number - lag(endpoint_number,1,0) over (order by endpoint_number) as frequency
from dba_tab_histograms
where owner='HR' and table_name='DEPARTMENTS' and column_name='DEPARTMENT_ID';
break on report skip 1
compute sum of product on report
column product format 999,999,999
with f1 as (
select
endpoint_value value,
endpoint_number - lag(endpoint_number,1,0) over(order by endpoint_number) frequency
from
dba_tab_histograms
where
owner='HR'
and table_name = 'EMPLOYEES'
and column_name = 'DEPARTMENT_ID'
order by
endpoint_value
),
f2 as (
select
endpoint_value value,
endpoint_number - lag(endpoint_number,1,0) over(order by endpoint_number) frequency
from
dba_tab_histograms
where
owner='HR'
and table_name = 'DEPARTMENTS'
and column_name = 'DEPARTMENT_ID'
order by
endpoint_value
)
select
f1.value, f1.frequency, f2.frequency, f1.frequency * f2.frequency product
from
f1, f2
where
f2.value = f1.value
;

View File

@@ -0,0 +1,85 @@
drop table T1 purge;
create table T1 tablespace USERS as
select
rownum id,
case when rownum<10 then mod(rownum,4) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 20
)
/
drop table T2 purge;
create table T2 tablespace USERS as
select
rownum id,
case when rownum<25 then mod(rownum,10) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100
)
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=150
and T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
exec dbms_stats.delete_table_stats('SYS','T1');
exec dbms_stats.delete_table_stats('SYS','T2');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size SKEWONLY');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size SKEWONLY');
alter system flush shared_pool;
select /*+ GATHER_PLAN_STATISTICS */
T1.ID
, T2.ID
, T1.COL1
from
T1,
T2
where
T1.COL1=3
and T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
@stats_col SYS T1 % % % %
@stats_col SYS T2 % % % %
@hist_cross_freq SYS T1 COL1 SYS T2 COL2

View File

@@ -0,0 +1,60 @@
drop table T1 purge;
create table T1 tablespace USERS as
select
rownum id,
case when rownum<4e4 then mod(rownum,500) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 5e5
)
/
drop table T2 purge;
create table T2 tablespace USERS as
select
rownum id,
case when rownum<8e5 then mod(rownum,500) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 1e6
)
/
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=150
and T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
exec dbms_stats.delete_table_stats('SYS','T1');
exec dbms_stats.delete_table_stats('SYS','T2');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size SKEWONLY');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size SKEWONLY');
@stats_col SYS T1 % % % %
@stats_col SYS T2 % % % %
@hist_cross_freq SYS T1 COL1 SYS T2 COL2

View File

@@ -0,0 +1,97 @@
https://hourim.wordpress.com/?s=histogram
https://jonathanlewis.wordpress.com/2013/10/09/12c-histograms-pt-3/
exec dbms_stats.delete_table_stats('SYS','T1');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for columns size 20 col1');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
select
endpoint_number,
endpoint_value,
endpoint_repeat_count
from
user_tab_histograms
where
table_name = 'T1'
order by
endpoint_number
;
set pages 50 lines 256
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
a.COL1 COL1
from
T1 a,
T1 b
where
a.COL1=b.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
set pages 50 lines 256
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
a.COL1 COL1
from
T1 a,
T1 b
where
a.COL1=33 and
a.COL1=b.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
set pages 50 lines 256
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
a.COL1 COL1
from
T1 a,
T1 b
where
a.COL1=37 and
a.COL1=b.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
37 distinct values - 20 popular values = 17 non popular values
On 32 lines => 17 non popular values (oniform distributed) => 2 lines / value
x 17

View File

@@ -0,0 +1,48 @@
exec dbms_stats.delete_table_stats('SYS','T1');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for columns size 20 col1');
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
set pages 50 lines 256
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
a.COL1 COL1
from
T1 a,
T1 b
where
a.COL1=9999 and
a.COL1=b.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
density = (nr_of_lines/_distinct_values)/100 = frequency_of_column / 100
frequency_of_non_popular_values = (nr_of_lines-sum(endpoint repeat count)) / (number_of_distinct_values - number_of_endpoints)
32 LINES ---- 17 NON POP
?
Test: val popuaire
val non populaire
val non populaire out of range

138
histograms/histogram_08.txt Normal file
View File

@@ -0,0 +1,138 @@
-- Setup
--------
drop table T1 purge;
create table T1 tablespace USERS as
select
rownum id,
case when rownum<10 then mod(rownum,4) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 20
)
/
drop table T2 purge;
create table T2 tablespace USERS as
select
rownum id,
case when rownum<25 then mod(rownum,10) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100
)
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
@stats_col SYS T1 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T1 COL1 11-FEB-23 09:20:04 Y N 0 20 4 5 0 .200000000000000 NONE 1
TABLE SYS.T1 ID 11-FEB-23 09:20:04 Y N 0 20 3 20 0 .050000000000000 NONE 1
SQL> @stats_col SYS T2 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T2 COL1 11-FEB-23 09:20:04 Y N 0 100 4 11 0 .090909090909091 NONE 1
TABLE SYS.T2 ID 11-FEB-23 09:20:04 Y N 0 100 3 100 0 .010000000000000 NONE 1
-------------------------------------------------------------------------------------------------------------------------------------------------
| Id | Operation | Name | Starts | E-Rows |E-Bytes| Cost (%CPU)| A-Rows | A-Time | Buffers | Writes | OMem | 1Mem | Used-Mem |
--------------------------------------------------------------------------------------------------------------------------------------------------
| 0 | CREATE TABLE STATEMENT | | 1 | | | 7 (100)| 0 |00:00:00.01 | 25 | 2 | | | |
| 1 | LOAD AS SELECT | Q | 1 | | | | 0 |00:00:00.01 | 25 | 2 | 1043K| 1043K| 1043K (0)|
|* 2 | HASH JOIN | | 1 | 182 | 2366 | 6 (0)| 861 |00:00:00.01 | 4 | 0 | 2078K| 2078K| 1219K (0)|
| 3 | TABLE ACCESS FULL | T1 | 1 | 20 | 120 | 3 (0)| 20 |00:00:00.01 | 2 | 0 | | | |
| 4 | TABLE ACCESS FULL | T2 | 1 | 100 | 700 | 3 (0)| 100 |00:00:00.01 | 2 | 0 | | | |
--------------------------------------------------------------------------------------------------------------------------------------------------
-- rows1*rows2/max(distinct1,distinct1) = rows1*rows2*min(density1,density2)
SQL> select 20*100*.090909090909091 from dual;
20*100*.090909090909091
-----------------------
181.818182
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS LEADING(T2 T1) */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
-- T1.COL1=150 and
T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
@stats_col SYS T1 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T1 COL1 11-FEB-23 09:20:04 Y N 0 20 4 5 0 .200000000000000 NONE 1
TABLE SYS.T1 ID 11-FEB-23 09:20:04 Y N 0 20 3 20 0 .050000000000000 NONE 1
SQL> @stats_col SYS T2 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T2 COL1 11-FEB-23 09:20:04 Y N 0 100 4 11 0 .090909090909091 NONE 1
TABLE SYS.T2 ID 11-FEB-23 09:20:04 Y N 0 100 3 100 0 .010000000000000 NONE 1
--------------------------------------------------------------------------------------------------------------------------------------------------
| Id | Operation | Name | Starts | E-Rows |E-Bytes| Cost (%CPU)| A-Rows | A-Time | Buffers | Writes | OMem | 1Mem | Used-Mem |
--------------------------------------------------------------------------------------------------------------------------------------------------
| 0 | CREATE TABLE STATEMENT | | 1 | | | 7 (100)| 0 |00:00:00.01 | 24 | 2 | | | |
| 1 | LOAD AS SELECT | Q | 1 | | | | 0 |00:00:00.01 | 24 | 2 | 1043K| 1043K| 1043K (0)|
|* 2 | HASH JOIN | | 1 | 182 | 2366 | 6 (0)| 861 |00:00:00.01 | 4 | 0 | 2078K| 2078K| 1315K (0)|
| 3 | TABLE ACCESS FULL | T2 | 1 | 100 | 700 | 3 (0)| 100 |00:00:00.01 | 2 | 0 | | | |
| 4 | TABLE ACCESS FULL | T1 | 1 | 20 | 120 | 3 (0)| 20 |00:00:00.01 | 2 | 0 | | | |
--------------------------------------------------------------------------------------------------------------------------------------------------
-- rows1*rows2/max(distinct1,distinct1) = rows1*rows2*min(density1,density2)
SQL> select 100*20*.090909090909091 from dual;
100*20*.090909090909091
-----------------------
181.818182

View File

@@ -0,0 +1,83 @@
-- Setup
--------
drop table T1 purge;
create table T1 tablespace USERS as
select
rownum id,
case when rownum<10 then mod(rownum,4) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 20
)
/
drop table T2 purge;
create table T2 tablespace USERS as
select
rownum id,
case when rownum<25 then mod(rownum,10) else 999 end col1
from ( select 1 just_a_column
from DUAL
connect by level <= 100
)
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=T2.ID
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
@stats_col SYS T1 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T1 COL1 11-FEB-23 09:20:04 Y N 0 20 4 5 0 .200000000000000 NONE 1
TABLE SYS.T1 ID 11-FEB-23 09:20:04 Y N 0 20 3 20 0 .050000000000000 NONE 1
SQL> @stats_col SYS T2 % % % %
b s Avg Num
Object a e Col Buc
Type TableName ColumnName LastAnalyzed l r Size (MB) SampleSize Len NumDistinct NumNulls Density Histogram ket
-------- --------------------------------------------- ------------------------- ------------------ - - --------- ---------------- ---- --------------- -------------- ------------------ --------------- ----
TABLE SYS.T2 COL1 11-FEB-23 09:20:04 Y N 0 100 4 11 0 .090909090909091 NONE 1
TABLE SYS.T2 ID 11-FEB-23 09:20:04 Y N 0 100 3 100 0 .010000000000000 NONE 1
--------------------------------------------------------------------------------------------------------------------------------------------------
| Id | Operation | Name | Starts | E-Rows |E-Bytes| Cost (%CPU)| A-Rows | A-Time | Buffers | Writes | OMem | 1Mem | Used-Mem |
--------------------------------------------------------------------------------------------------------------------------------------------------
| 0 | CREATE TABLE STATEMENT | | 1 | | | 7 (100)| 0 |00:00:00.01 | 24 | 1 | | | |
| 1 | LOAD AS SELECT | Q | 1 | | | | 0 |00:00:00.01 | 24 | 1 | 1043K| 1043K| 1043K (0)|
|* 2 | HASH JOIN | | 1 | 20 | 180 | 6 (0)| 7 |00:00:00.01 | 4 | 0 | 2078K| 2078K| 1219K (0)|
| 3 | TABLE ACCESS FULL | T1 | 1 | 20 | 120 | 3 (0)| 20 |00:00:00.01 | 2 | 0 | | | |
| 4 | TABLE ACCESS FULL | T2 | 1 | 100 | 300 | 3 (0)| 100 |00:00:00.01 | 2 | 0 | | | |
--------------------------------------------------------------------------------------------------------------------------------------------------
-- rows1*rows2/max(distinct1,distinct1) = rows1*rows2*min(density1,density2)
SQL> select 20*100*.010000000000000 from dual;
20*100*.010000000000000
-----------------------
20

179
histograms/histogram_10.txt Normal file
View File

@@ -0,0 +1,179 @@
-- Setup
--------
drop table T1 purge;
create table T1(
id NUMBER not null,
col1 NUMBER,
col2 NUMBER
)
tablespace USERS;
declare
v_id NUMBER;
v_col1 NUMBER;
v_col2 NUMBER;
begin
for i IN 1..40 loop
-- id column
v_id:=i;
-- col1 column
if (i between 1 and 15) then v_col1:=mod(i,3); end if;
if (i between 16 and 40) then v_col1:=i; end if;
-- col2 column
if (i between 1 and 30) then v_col2:=mod(i,6); end if;
if (i between 31 and 40) then v_col2:=999; end if;
-- insert values
insert into T1 values (v_id,v_col1,v_col2);
end loop;
commit;
end;
/
drop table T2 purge;
create table T2(
id NUMBER not null,
col1 NUMBER,
col2 NUMBER
)
tablespace USERS;
declare
v_id NUMBER;
v_col1 NUMBER;
v_col2 NUMBER;
begin
for i IN 1..150 loop
-- id column
v_id:=i;
-- col1 column
if (i between 1 and 49) then v_col1:=mod(i,7); end if;
if (i between 50 and 100) then v_col1:=i; end if;
if (i between 101 and 150) then v_col1:=777; end if;
-- col2 column
if (i between 1 and 100) then v_col2:=mod(i,10); end if;
if (i between 101 and 140) then v_col2:=999; end if;
if (i between 141 and 150) then v_col2:=i; end if;
-- insert values
insert into T2 values (v_id,v_col1,v_col2);
end loop;
commit;
end;
/
exec dbms_stats.gather_table_stats(user,'T1', method_opt=>'for all columns size 1');
exec dbms_stats.gather_table_stats(user,'T2', method_opt=>'for all columns size 1');
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=T2.COL1
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL2=T2.COL2
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
---------------------------------------------------------
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=T2.COL1 and
T1.COL2=T2.COL2
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS */
T1.ID id1
, T2.ID id2
, T1.COL1 val
from
T1,
T2
where
T1.COL1=T2.COL1 or
T1.COL2=T2.COL2
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES'));
--------------------------------------------------------
set lines 250 pages 999
alter system flush shared_pool;
drop table Q purge;
create table Q as
select /*+ GATHER_PLAN_STATISTICS MONITOR */
*
from
T2
where
COL1>=7
/
select * from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +PEEKED_BINDS +PARALLEL +PARTITION +COST +BYTES +NOTE'));
set pages 0 linesize 32767 trimspool on trim on long 1000000 longchunksize 10000000
select dbms_perf.report_sql(sql_id=>'cgud94u0jkhjj',outer_start_time=>sysdate-1, outer_end_time=>sysdate, selected_start_time=>sysdate-1, selected_end_time=>sysdate,type=>'TEXT') from dual;
SELECT report_id,PERIOD_START_TIME,PERIOD_END_TIME,GENERATION_TIME FROM dba_hist_reports WHERE component_name = 'sqlmonitor' AND (period_start_time BETWEEN sysdate-1 and sysdate )AND key1 = 'cgud94u0jkhjj';
set pages 0 linesize 32767 trimspool on trim on long 1000000 longchunksize 10000000
SELECT DBMS_AUTO_REPORT.REPORT_REPOSITORY_DETAIL(RID => 145, TYPE => 'text') FROM dual;

View File

@@ -0,0 +1,109 @@
drop table T1 purge;
create table T1 (col1 NUMBER) tablespace USERS;
insert into T1 values (8);
insert into T1 values (12);
insert into T1 values (12);
insert into T1 values (13);
insert into T1 values (13);
insert into T1 values (13);
insert into T1 values (15);
insert into T1 values (16);
insert into T1 values (16);
insert into T1 values (17);
insert into T1 values (18);
insert into T1 values (18);
insert into T1 values (19);
insert into T1 values (19);
insert into T1 values (19);
insert into T1 values (20);
insert into T1 values (20);
insert into T1 values (20);
insert into T1 values (20);
insert into T1 values (20);
insert into T1 values (21);
insert into T1 values (22);
insert into T1 values (22);
insert into T1 values (22);
insert into T1 values (23);
insert into T1 values (23);
insert into T1 values (24);
insert into T1 values (24);
insert into T1 values (25);
insert into T1 values (26);
insert into T1 values (26);
insert into T1 values (26);
insert into T1 values (27);
insert into T1 values (27);
insert into T1 values (27);
insert into T1 values (27);
insert into T1 values (27);
insert into T1 values (27);
insert into T1 values (28);
insert into T1 values (28);
insert into T1 values (28);
insert into T1 values (28);
insert into T1 values (28);
insert into T1 values (28);
insert into T1 values (29);
insert into T1 values (29);
insert into T1 values (29);
insert into T1 values (29);
insert into T1 values (29);
insert into T1 values (29);
insert into T1 values (30);
insert into T1 values (30);
insert into T1 values (30);
insert into T1 values (31);
insert into T1 values (31);
insert into T1 values (31);
insert into T1 values (31);
insert into T1 values (31);
insert into T1 values (32);
insert into T1 values (32);
insert into T1 values (32);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (33);
insert into T1 values (34);
insert into T1 values (34);
insert into T1 values (34);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (35);
insert into T1 values (36);
insert into T1 values (37);
insert into T1 values (38);
insert into T1 values (38);
insert into T1 values (38);
insert into T1 values (38);
insert into T1 values (38);
insert into T1 values (39);
insert into T1 values (39);
insert into T1 values (40);
insert into T1 values (41);
insert into T1 values (42);
insert into T1 values (42);
insert into T1 values (43);
insert into T1 values (43);
insert into T1 values (43);
insert into T1 values (44);
insert into T1 values (45);
insert into T1 values (46);
insert into T1 values (50);
insert into T1 values (59);
commit;

View File

@@ -0,0 +1,109 @@
drop table T2 purge;
create table T2 (col1 NUMBER) tablespace USERS;
insert into T2 values (8);
insert into T2 values (12);
insert into T2 values (12);
insert into T2 values (22);
insert into T2 values (22);
insert into T2 values (22);
insert into T2 values (15);
insert into T2 values (16);
insert into T2 values (16);
insert into T2 values (17);
insert into T2 values (18);
insert into T2 values (18);
insert into T2 values (19);
insert into T2 values (19);
insert into T2 values (19);
insert into T2 values (20);
insert into T2 values (20);
insert into T2 values (20);
insert into T2 values (20);
insert into T2 values (20);
insert into T2 values (21);
insert into T2 values (22);
insert into T2 values (22);
insert into T2 values (22);
insert into T2 values (23);
insert into T2 values (23);
insert into T2 values (25);
insert into T2 values (25);
insert into T2 values (25);
insert into T2 values (26);
insert into T2 values (26);
insert into T2 values (26);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (28);
insert into T2 values (28);
insert into T2 values (28);
insert into T2 values (28);
insert into T2 values (28);
insert into T2 values (28);
insert into T2 values (29);
insert into T2 values (29);
insert into T2 values (29);
insert into T2 values (29);
insert into T2 values (29);
insert into T2 values (29);
insert into T2 values (30);
insert into T2 values (30);
insert into T2 values (30);
insert into T2 values (31);
insert into T2 values (31);
insert into T2 values (31);
insert into T2 values (31);
insert into T2 values (31);
insert into T2 values (32);
insert into T2 values (32);
insert into T2 values (32);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (33);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (35);
insert into T2 values (36);
insert into T2 values (37);
insert into T2 values (38);
insert into T2 values (38);
insert into T2 values (38);
insert into T2 values (38);
insert into T2 values (38);
insert into T2 values (39);
insert into T2 values (39);
insert into T2 values (50);
insert into T2 values (51);
insert into T2 values (52);
insert into T2 values (52);
insert into T2 values (53);
insert into T2 values (53);
insert into T2 values (53);
insert into T2 values (55);
insert into T2 values (55);
insert into T2 values (56);
insert into T2 values (50);
insert into T2 values (59);
commit;