2026-03-12 20:23:15

This commit is contained in:
root
2026-03-12 21:23:47 +01:00
parent eab4b36eca
commit 93039b8489
3332 changed files with 699614 additions and 0 deletions

5
vdh/.gitattributes vendored Normal file
View File

@@ -0,0 +1,5 @@
* text=auto
*.txt text
*.sql text
*.sh text eol=lf
*.py text

133
vdh/Edit.000 Normal file
View File

@@ -0,0 +1,133 @@
sys@GLIMS> r
1* select event, wait_class, sql_id from v$session where sid = 134
EVENT WAIT_CLASS
---------------------------------------------------------------- ----------------------------------------------------------------
SQL_ID
-------------
db file sequential read User I/O
a87bp8vnpnckt
sys@GLIMS> r
1* select event, wait_class, sql_id from v$session where sid = 134
EVENT WAIT_CLASS
---------------------------------------------------------------- ----------------------------------------------------------------
SQL_ID
-------------
db file sequential read User I/O
a87bp8vnpnckt
sys@GLIMS> select * from table(dbms_xplan.display_cursor('a87bp8vnpnckt'));
PLAN_TABLE_OUTPUT
----------------------------------------------------------------------------------------------------------------------------------
SQL_ID a87bp8vnpnckt, child number 0
-------------------------------------
SELECT /*+ INDEX_ASC(T1 RSLT_ID) */ T0.RSTI_ID
unique_id_0,T0.RSTI_ACTION,T0.RSTI_RESULT,T1.RSLT_ID
unique_id_1,T1.RSLT_ID,T1.RSLT_OBJECT,T1.RSLT_OBJECTTIME,T1.RSLT_SPECIME
N,T1.RSLT_PROPERTY,T1.RSLT_PROPERTYOUTPUT,T1.RSLT_QCPOPULATION,T1.RSLT_R
AWVALUE,T1.RSLT_RAWUNIT,T1.RSLT_COMPARATOR,T1.RSLT_MANTISSA,T1.RSLT_EXPO
NENT,T1.RSLT_PROPERTYCHOICE,T1.RSLT_SEVERITY,T1.RSLT_STATUS,T1.RSLT_UNSO
LICITED,T1.RSLT_PHONESTATUS,T1.RSLT_NEEDSMANUALVALIDATION,T1.RSLT_NORM,T
1.RSLT_ALTERNATEUNITNORM,T1.RSLT_ORIGIN,T1.RSLT_CHANNELQUALITY,T1.RSLT_N
ORMSEVERITY,T1.RSLT_DELTANORMSEVERITY,T1.RSLT_QCSEVERITY,T1.RSLT_STATION
SEVERITY,T1.RSLT_MANUALSEVERITY,T1.RSLT_EXTERNALCOMMENT,T1.RSLT_INTERNAL
COMMENT,T1.RSLT_AVAILABILITYUSER,T1.RSLT_AVAILABILITYTIME,T1.RSLT_CONFIR
MATIONUSER,T1.RSLT_CONFIRMATIONTIME,T1.RSLT_VALIDATIONUSER,T1.RSLT_VALID
ATIONTIME,T1.RSLT_REJECTIONUSER,T1.RSLT_REJECTIONTIME,T1.RSLT_REJECTIONR
EASON,T1.RSLT_PREDECESSOR,T1.RSLT_ASSESSMENTCOUNT,T1.RSLT_CREATIONUSER,T
1.RSLT_CREATIONTIME,T1.RSLT_LASTUPDA
Plan hash value: 442047200
-----------------------------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
-----------------------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | | | 11 (100)| |
| 1 | SORT ORDER BY | | 3 | 987 | 11 (10)| 00:00:01 |
|* 2 | FILTER | | | | | |
| 3 | NESTED LOOPS | | | | | |
| 4 | NESTED LOOPS | | 3 | 987 | 10 (0)| 00:00:01 |
| 5 | TABLE ACCESS BY INDEX ROWID| RESULTINPUT | 3 | 54 | 4 (0)| 00:00:01 |
|* 6 | INDEX RANGE SCAN | RSTI_ACTION | 3 | | 3 (0)| 00:00:01 |
|* 7 | INDEX UNIQUE SCAN | RSLT_ID | 1 | | 1 (0)| 00:00:01 |
|* 8 | TABLE ACCESS BY INDEX ROWID | RESULT | 1 | 311 | 2 (0)| 00:00:01 |
-----------------------------------------------------------------------------------------------
Predicate Information (identified by operation id):
---------------------------------------------------
2 - filter(TO_NUMBER(:2)<=TO_NUMBER(:3))
6 - access("T0"."RSTI_ACTION"=TO_NUMBER(:1))
7 - access("T0"."RSTI_RESULT"="T1"."RSLT_ID")
8 - filter(("T1"."RSLT_PROPERTY"=TO_NUMBER(:4) AND "T1"."RSLT_COMPARATOR" IS NULL
AND "T1"."RSLT_STATUS"<=TO_NUMBER(:3) AND "T1"."RSLT_STATUS">=TO_NUMBER(:2)))
43 rows selected.
sys@GLIMS> select sql_fulltext from v$sql where sql_id = 'a87bp8vnpnckt' and rownum = 1;
SQL_FULLTEXT
--------------------------------------------------------------------------------
SELECT /*+ INDEX_ASC(T1 RSLT_ID) */ T0.RSTI_ID unique_id_0,T0.RSTI_ACTION,T0.RST
I_RESULT,T1.RSLT_ID unique_id_1,T1.RSLT_ID,T1.RSLT_OBJECT,T1.RSLT_OBJECTTIME,T1.
RSLT_SPECIMEN,T1.RSLT_PROPERTY,T1.RSLT_PROPERTYOUTPUT,T1.RSLT_QCPOPULATION,T1.RS
LT_RAWVALUE,T1.RSLT_RAWUNIT,T1.RSLT_COMPARATOR,T1.RSLT_MANTISSA,T1.RSLT_EXPONENT
,T1.RSLT_PROPERTYCHOICE,T1.RSLT_SEVERITY,T1.RSLT_STATUS,T1.RSLT_UNSOLICITED,T1.R
SLT_PHONESTATUS,T1.RSLT_NEEDSMANUALVALIDATION,T1.RSLT_NORM,T1.RSLT_ALTERNATEUNIT
NORM,T1.RSLT_ORIGIN,T1.RSLT_CHANNELQUALITY,T1.RSLT_NORMSEVERITY,T1.RSLT_DELTANOR
MSEVERITY,T1.RSLT_QCSEVERITY,T1.RSLT_STATIONSEVERITY,T1.RSLT_MANUALSEVERITY,T1.R
SLT_EXTERNALCOMMENT,T1.RSLT_INTERNALCOMMENT,T1.RSLT_AVAILABILITYUSER,T1.RSLT_AVA
ILABILITYTIME,T1.RSLT_CONFIRMATIONUSER,T1.RSLT_CONFIRMATIONTIME,T1.RSLT_VALIDATI
ONUSER,T1.RSLT_VALIDATIONTIME,T1.RSLT_REJECTIONUSER,T1.RSLT_REJECTIONTIME,T1.RSL
T_REJECTIONREASON,T1.RSLT_PREDECESSOR,T1.RSLT_ASSESSMENTCOUNT,T1.RSLT_CREATIONUS
ER,T1.RSLT_CREATIONTIME,T1.RSLT_LASTUPDATEUSER,T1.RSLT_LASTUPDATETIME,T1.RSLT_VE
RSION,T1.RSLT_CONFIRMATIONMETHOD,T1.RSLT_VALIDATIONMETHOD,T1.RSLT_ORDER,T1.RSLT_
HASDUPLICATE,T1.RSLT_URGENCY,T1.RSLT_HASREQUESTCOMMENT,T1.RSLT_ORIGINAL,T1.RSLT_
ISDUPLICATE,T1.RSLT_LOG,T1.RSLT_FIRSTREPORTTIME,T1.RSLT_LASTREPORTTIME,T1.RSLT_D
EPARTMENT,T1.RSLT_MODIFICATIONMETHOD,T1.RSLT_NEEDSMANUALCONFIRM,T1.RSLT_REFERENC
ETIME,T1.RSLT_DYNAMICTYPE,T1.RSLT_RESTRICTEDVALIDATOR,T1.RSLT_EXPERTSYSTEMFLAGS,
T1.RSLT_SHIFT,T1.RSLT_PHONEMARKMETHOD,T1.RSLT_METHOD,T1.RSLT_REQDILUTIONFACTOR,T
1.RSLT_REQDILUTIONCONDITION,T1.RSLT_DILUTIONMODE,T1.RSLT_EFFDILUTIONCONDITION,T1
.RSLT_EFFDILUTIONFACTOR,T1.RSLT_EXTERNALCOMMENTTIME FROM ORAGLIMS.RESULTINPUT T
0,ORAGLIMS.RESULT T1 WHERE ((T0.RSTI_ACTION = :1 AND T0.RSTI_RESULT = T1.RSLT_I
D AND T1.RSLT_STATUS >= :2 AND T1.RSLT_STATUS <= :3 AND T1.RSLT_PROPERTY = :4 AN
D T1.RSLT_COMPARATOR IS NULL )) ORDER BY T0.RSTI_ACTION, T1.RSLT_ID
select /*+ INDEX_ASC(t1 rslt_id) */
t0.rsti_id unique_id_0,t0.rsti_action,t0.rsti_result,t1.rslt_id unique_id_1,
t1.rslt_id,t1.rslt_object,t1.rslt_objecttime,t1.rslt_specimen,t1.rslt_property,t1.
rslt_propertyoutput,t1.rslt_qcpopulation,t1.rslt_rawvalue,t1.rslt_rawunit,
t1.rslt_comparator,t1.rslt_mantissa,t1.rslt_exponent,t1.rslt_propertychoice,
t1.rslt_severity,t1.rslt_status,t1.rslt_unsolicited,t1.rslt_phonestatus,
t1.rslt_needsmanualvalidation,t1.rslt_norm,t1.rslt_alternateunitnorm,t1.rslt_origin,
t1.rslt_channelquality,t1.rslt_normseverity,t1.rslt_deltanormseverity,t1.rslt_qcseverity,
t1.rslt_stationseverity,t1.rslt_manualseverity,t1.rslt_externalcomment,
t1.rslt_internalcomment,t1.rslt_availabilityuser,t1.rslt_availabilitytime,
t1.rslt_confirmationuser,t1.rslt_confirmationtime,t1.rslt_validationuser,
t1.rslt_validationtime,t1.rslt_rejectionuser,t1.rslt_rejectiontime,t1.rslt_rejectionreason,
t1.rslt_predecessor,t1.rslt_assessmentcount,t1.rslt_creationuser,t1.rslt_creationtime,
t1.rslt_lastupdateuser,t1.rslt_lastupdatetime,t1.rslt_version,t1.rslt_confirmationmethod,
t1.rslt_validationmethod,t1.rslt_order,t1.rslt_hasduplicate,t1.rslt_urgency,
t1.rslt_hasrequestcomment,t1.rslt_original,t1.rslt_isduplicate,t1.rslt_log,
t1.rslt_firstreporttime,t1.rslt_lastreporttime,t1.rslt_department,t1.rslt_modificationmethod,
t1.rslt_needsmanualconfirm,t1.rslt_referencetime,t1.rslt_dynamictype,t1.rslt_restrictedvalidator,
t1.rslt_expertsystemflags,t1.rslt_shift,t1.rslt_phonemarkmethod,t1.rslt_method,
t1.rslt_reqdilutionfactor,t1.rslt_reqdilutioncondition,t1.rslt_dilutionmode,
t1.rslt_effdilutioncondition,t1.rslt_effdilutionfactor,t1.rslt_externalcommenttime
from oraglims.resultinput t0,oraglims.result t1
where (( t0.rsti_action = :1
and t0.rsti_result = t1.rslt_id
and t1.rslt_status >= :2
and t1.rslt_status <= :3
and t1.rslt_property = :4
and t1.rslt_comparator is null
)
)
order by t0.rsti_action, t1.rslt_id

26
vdh/Edit.001 Normal file
View File

@@ -0,0 +1,26 @@
PARSING IN CURSOR #47505495601448 len=1907 dep=0 uid=70 oct=3 lid=70 tim=1303377501916730 hv=3915002457 ad='c9fda7d8' sqlid='a87bp8vnpnckt'
SELECT /*+ INDEX_ASC(T1 RSLT_ID) */ T0.RSTI_ID unique_id_0,T0.RSTI_ACTION,T0.RSTI_RESULT,T1.RSLT_ID unique_id_1,T1.RSLT_ID,T1.RSLT_OBJECT,T1.RSLT_OBJECTTIME,T1.RSLT_SPECIMEN,T1.RSLT_PROPERTY,T1.RSLT_PROPERTYOUTPUT,T1.RSLT_QCPOPULATION,T1.RSLT_RAWVALUE,T1.RSLT_RAWUNIT,T1.RSLT_COMPARATOR,T1.RSLT_MANTISSA,T1.RSLT_EXPONENT,T1.RSLT_PROPERTYCHOICE,T1.RSLT_SEVERITY,T1.RSLT_STATUS,T1.RSLT_UNSOLICITED,T1.RSLT_PHONESTATUS,T1.RSLT_NEEDSMANUALVALIDATION,T1.RSLT_NORM,T1.RSLT_ALTERNATEUNITNORM,T1.RSLT_ORIGIN,T1.RSLT_CHANNELQUALITY,T1.RSLT_NORMSEVERITY,T1.RSLT_DELTANORMSEVERITY,T1.RSLT_QCSEVERITY,T1.RSLT_STATIONSEVERITY,T1.RSLT_MANUALSEVERITY,T1.RSLT_EXTERNALCOMMENT,T1.RSLT_INTERNALCOMMENT,T1.RSLT_AVAILABILITYUSER,T1.RSLT_AVAILABILITYTIME,T1.RSLT_CONFIRMATIONUSER,T1.RSLT_CONFIRMATIONTIME,T1.RSLT_VALIDATIONUSER,T1.RSLT_VALIDATIONTIME,T1.RSLT_REJECTIONUSER,T1.RSLT_REJECTIONTIME,T1.RSLT_REJECTIONREASON,T1.RSLT_PREDECESSOR,T1.RSLT_ASSESSMENTCOUNT,T1.RSLT_CREATIONUSER,T1.RSLT_CREATIONTIME,T1.RSLT_LASTUPDATEUSER,T1.RSLT_LASTUPDATETIME,T1.RSLT_VERSION,T1.RSLT_CONFIRMATIONMETHOD,T1.RSLT_VALIDATIONMETHOD,T1.RSLT_ORDER,T1.RSLT_HASDUPLICATE,T1.RSLT_URGENCY,T1.RSLT_HASREQUESTCOMMENT,T1.RSLT_ORIGINAL,T1.RSLT_ISDUPLICATE,T1.RSLT_LOG,T1.RSLT_FIRSTREPORTTIME,T1.RSLT_LASTREPORTTIME,T1.RSLT_DEPARTMENT,T1.RSLT_MODIFICATIONMETHOD,T1.RSLT_NEEDSMANUALCONFIRM,T1.RSLT_REFERENCETIME,T1.RSLT_DYNAMICTYPE,T1.RSLT_RESTRICTEDVALIDATOR,T1.RSLT_EXPERTSYSTEMFLAGS,T1.RSLT_SHIFT,T1.RSLT_PHONEMARKMETHOD,T1.RSLT_METHOD,T1.RSLT_REQDILUTIONFACTOR,T1.RSLT_REQDILUTIONCONDITION,T1.RSLT_DILUTIONMODE,T1.RSLT_EFFDILUTIONCONDITION,T1.RSLT_EFFDILUTIONFACTOR,T1.RSLT_EXTERNALCOMMENTTIME FROM ORAGLIMS.RESULTINPUT T0,ORAGLIMS.RESULT T1 WHERE ((T0.RSTI_ACTION = :1 AND T0.RSTI_RESULT = T1.RSLT_ID AND T1.RSLT_STATUS >= :2 AND T1.RSLT_STATUS <= :3 AND T1.RSLT_PROPERTY = :4 AND T1.RSLT_COMPARATOR IS NULL )) ORDER BY T0.RSTI_ACTION, T1.RSLT_ID
END OF STMT
PARSE #47505495601448:c=0,e=388,p=0,cr=0,cu=0,mis=1,r=0,dep=0,og=2,plh=0,tim=1303377501916729
BINDS #47505495601448:
Bind#0
oacdty=01 mxl=32(31) mxlc=00 mal=00 scl=00 pre=00
oacflg=01 fl2=1000000 frm=01 csi=178 siz=128 off=0
kxsbbbfp=2b34bc308a88 bln=32 avl=08 flg=05
value="21087656"
Bind#1
oacdty=01 mxl=32(31) mxlc=00 mal=00 scl=00 pre=00
oacflg=01 fl2=1000000 frm=01 csi=178 siz=0 off=32
kxsbbbfp=2b34bc308aa8 bln=32 avl=01 flg=01
value="3"
Bind#2
oacdty=01 mxl=32(31) mxlc=00 mal=00 scl=00 pre=00
oacflg=01 fl2=1000000 frm=01 csi=178 siz=0 off=64
kxsbbbfp=2b34bc308ac8 bln=32 avl=01 flg=01
value="5"
Bind#3
oacdty=01 mxl=32(31) mxlc=00 mal=00 scl=00 pre=00
oacflg=01 fl2=1000000 frm=01 csi=178 siz=0 off=96
kxsbbbfp=2b34bc308ae8 bln=32 avl=04 flg=01
value="8997"
EXEC #47505495601448:c=4000,e=3046,p=0,cr=0,cu=0,mis=1,r=0,dep=0,og=2,plh=2844530747,tim=1303377501919822

65
vdh/Edit.002 Normal file
View File

@@ -0,0 +1,65 @@
--MET HINT
PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------
Plan hash value: 442047200
-----------------------------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
-----------------------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 3 | 987 | 11 (10)| 00:00:01 |
| 1 | SORT ORDER BY | | 3 | 987 | 11 (10)| 00:00:01 |
|* 2 | FILTER | | | | | |
| 3 | NESTED LOOPS | | | | | |
| 4 | NESTED LOOPS | | 3 | 987 | 10 (0)| 00:00:01 |
| 5 | TABLE ACCESS BY INDEX ROWID| RESULTINPUT | 3 | 54 | 4 (0)| 00:00:01 |
|* 6 | INDEX RANGE SCAN | RSTI_ACTION | 3 | | 3 (0)| 00:00:01 |
|* 7 | INDEX UNIQUE SCAN | RSLT_ID | 1 | | 1 (0)| 00:00:01 |
|* 8 | TABLE ACCESS BY INDEX ROWID | RESULT | 1 | 311 | 2 (0)| 00:00:01 |
-----------------------------------------------------------------------------------------------
Predicate Information (identified by operation id):
---------------------------------------------------
2 - filter(TO_NUMBER(:2)<=TO_NUMBER(:3))
6 - access("T0"."RSTI_ACTION"=TO_NUMBER(:1))
7 - access("T0"."RSTI_RESULT"="T1"."RSLT_ID")
8 - filter("T1"."RSLT_PROPERTY"=TO_NUMBER(:4) AND "T1"."RSLT_STATUS">=TO_NUMBER(:2)
AND "T1"."RSLT_STATUS"<=TO_NUMBER(:3) AND "T1"."RSLT_COMPARATOR" IS NULL)
24 rows selected.
--ZONDER HINT
PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------
Plan hash value: 442047200
-----------------------------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
-----------------------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 3 | 987 | 11 (10)| 00:00:01 |
| 1 | SORT ORDER BY | | 3 | 987 | 11 (10)| 00:00:01 |
|* 2 | FILTER | | | | | |
| 3 | NESTED LOOPS | | | | | |
| 4 | NESTED LOOPS | | 3 | 987 | 10 (0)| 00:00:01 |
| 5 | TABLE ACCESS BY INDEX ROWID| RESULTINPUT | 3 | 54 | 4 (0)| 00:00:01 |
|* 6 | INDEX RANGE SCAN | RSTI_ACTION | 3 | | 3 (0)| 00:00:01 |
|* 7 | INDEX UNIQUE SCAN | RSLT_ID | 1 | | 1 (0)| 00:00:01 |
|* 8 | TABLE ACCESS BY INDEX ROWID | RESULT | 1 | 311 | 2 (0)| 00:00:01 |
-----------------------------------------------------------------------------------------------
Predicate Information (identified by operation id):
---------------------------------------------------
2 - filter(TO_NUMBER(:2)<=TO_NUMBER(:3))
6 - access("T0"."RSTI_ACTION"=TO_NUMBER(:1))
7 - access("T0"."RSTI_RESULT"="T1"."RSLT_ID")
8 - filter("T1"."RSLT_PROPERTY"=TO_NUMBER(:4) AND "T1"."RSLT_STATUS">=TO_NUMBER(:2)
AND "T1"."RSLT_STATUS"<=TO_NUMBER(:3) AND "T1"."RSLT_COMPARATOR" IS NULL)
24 rows selected.

3
vdh/Edit.003 Normal file
View File

@@ -0,0 +1,3 @@
select to_number(substr(banner,instr(banner,'Release')+8,instr(banner,'.',1,2)-8-instr(banner,'Release')),'', 'NLS_NUMERIC_CHARACTERS = ''.,''')*10
from V$VERSION
where banner like 'Oracle%' and rownum < 2;

50
vdh/Edit.004 Normal file
View File

@@ -0,0 +1,50 @@
set pages 999
set linesize 200
column nbr_exec format 99G999G999
column ela_sec format 999G999G999
column dreads format 999G999G999
column bgets format 999G999G999
column avg_ela_sec format 9G999G999D99
column avg_dreads format 99G999G999
column avg_bgets format 99G999G999
with exec_stats as
( select
sqlstat.sql_id,
sqlstat.plan_hash_value phash_value,
min(sqlstat.snap_id) min_snap_id,
max(sqlstat.snap_id) max_snap_id,
sum(sqlstat.executions_delta) nbr_exec,
sum(sqlstat.elapsed_time_delta)/1000000 ela_sec,
sum(sqlstat.disk_reads_delta) dreads,
sum(sqlstat.buffer_gets_delta) bgets
from
dba_hist_sqlstat sqlstat
where
sql_id = '&sql_id'
group by
sql_id,
plan_hash_value
)
select
to_char(snap1.begin_interval_time, 'DD/MM/YYYY HH24:MI') earliest_occur,
to_char(snap2.end_interval_time, 'DD/MM/YYYY HH24:MI') latest_occur,
sql_id,
phash_value,
nbr_exec,
ela_sec,
dreads,
bgets,
(ela_sec/nbr_exec) avg_ela_sec,
(dreads/nbr_exec) avg_dreads,
(bgets/nbr_exec) avg_bgets
from
exec_stats,
dba_hist_snapshot snap1,
dba_hist_snapshot snap2
where
exec_stats.min_snap_id = snap1.snap_id
and exec_stats.max_snap_id = snap2.snap_id
;

16
vdh/Edit.005 Normal file
View File

@@ -0,0 +1,16 @@
select *
from ( select bad.thread#, bad.sequence#, bad.next_change#, to_char(bad.next_time, 'DD/MM/YYYY HH24:MI:SS') next_time
from v$backup_archivelog_details bad
where (thread#, sequence#) in
( select bad2.thread#, max(bad2.sequence#) last_sequence#
from v$backup_archivelog_details bad2
group by bad2.thread#
)
and resetlogs_time =
( select resetlogs_time
from v$database_incarnation
where status = 'CURRENT'
)
order by bad.next_change#
)
where rownum = 1;

6
vdh/README.md Normal file
View File

@@ -0,0 +1,6 @@
toolbox
=======
collection of sql and shell scripts that I use as an Oracle DBA
Some of these scripts are written by me, others come (as is or modified) from other DBA's

1952
vdh/RecreateUsers.sql Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,294 @@
#!/usr/bin/env python
"""Simple HTTP Server With Upload.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.1"
__all__ = ["SimpleHTTPRequestHandler"]
__author__ = "bones7456"
__home_page__ = "http://li2z.cn/"
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET/HEAD/POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method. And can reveive file uploaded
by client.
The GET/HEAD/POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTPWithUpload/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def do_POST(self):
"""Serve a POST request."""
r, info = self.deal_post_data()
print r, info, "by: ", self.client_address
f = StringIO()
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Upload Result Page</title>\n")
f.write("<body>\n<h2>Upload Result Page</h2>\n")
f.write("<hr>\n")
if r:
f.write("<strong>Success:</strong>")
else:
f.write("<strong>Failed:</strong>")
f.write(info)
f.write("<br><a href=\"%s\">back</a>" % self.headers['referer'])
f.write("<hr><small>Powerd By: bones7456, check new version at ")
f.write("<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
f.write("here</a>.</small></body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n")
f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write("<input name=\"file\" type=\"file\"/>")
f.write("<input type=\"submit\" value=\"upload\"/></form>\n")
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()

213
vdh/activity_monitor.sql Normal file
View File

@@ -0,0 +1,213 @@
set echo off
set feedback off
set linesize 150
set pages 9999
column space_limit_mb format 999G999D99
column space_used_mb format 999G999D99
column space_reclaimable_mb format 999G999D99
column percent_space_used format 00D99
column percent_space_reclaimable format 00D99
compute sum of space_used_mb on report
compute sum of space_reclaimable_mb on report
compute sum of percent_space_used on report
compute sum of percent_space_reclaimable on report
compute sum of number_of_files on report;
break on space_limit_mb on report
select fusg.file_type, decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(ra.space_limit, 0))/1048576 space_limit_mb,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(fusg.space_used, 0))/1048576 space_used_mb,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, round(nvl(fusg.space_used, 0)/ra.space_limit, 4) * 100) percent_space_used,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(fusg.space_reclaimable, 0))/1048576 space_reclaimable_mb,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, round(nvl(fusg.space_reclaimable, 0)/ra.space_limit, 4) * 100) percent_space_reclaimable,
nvl2(ra.name, fusg.number_of_files, 0) number_of_files
from v$recovery_file_dest ra,
( select 'CONTROLFILE' file_type,
sum( case when ceilasm = 1 and name like '+%'
then ceil(((block_size*file_size_blks)+1)/1048576)*1048576
else block_size*file_size_blks
end
) space_used,
0 space_reclaimable, count(*) number_of_files
from v$controlfile,
( select /*+ no_merge*/ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
union all
select 'ONLINELOG' file_type,
sum( case when ceilasm = 1 and member like '+%'
then ceil((l.bytes+1)/1048576)*1048576
else l.bytes
end
) space_used,
0 space_reclaimable, count(*) number_of_files
from ( select group#, bytes
from v$log
union
select group#, bytes
from v$standby_log
) l, v$logfile lf,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where l.group# = lf.group#
and lf.is_recovery_dest_file = 'YES'
union all
select 'ARCHIVELOG' file_type,
sum(al.file_size) space_used,
sum( case when dl.rectype = 11
then al.file_size
else 0
end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and name like '+%'
then ceil(((blocks*block_size)+1)/1048576)*1048576
else blocks * block_size
end file_size
from v$archived_log,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and name is not null
) al,
x$kccagf dl
where al.recid = dl.recid(+)
and dl.rectype(+) = 11
union all
select 'BACKUPPIECE' file_type,
sum(bp.file_size) space_used,
sum ( case when dl.rectype = 13
then bp.file_size
else 0
end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and handle like '+%'
then ceil((bytes+1)/1048576)*1048576
else bytes
end file_size
from v$backup_piece,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and handle is not null
) bp,
x$kccagf dl
where bp.recid = dl.recid(+)
and dl.rectype(+) = 13
union all
select 'IMAGECOPY' file_type,
sum(dc.file_size) space_used,
sum( case when dl.rectype = 16
then dc.file_size
else 0 end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and name like '+%'
then ceil(((blocks*block_size)+1)/1048576)*1048576
else blocks * block_size
end file_size
from v$datafile_copy,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and name is not null
) dc,
x$kccagf dl
where dc.recid = dl.recid(+)
and dl.rectype(+) = 16
union all
select 'FLASHBACKLOG' file_type,
nvl(fl.space_used, 0) space_used,
nvl(fb.reclsiz, 0) space_reclaimable,
nvl(fl.number_of_files, 0) number_of_files
from ( select sum( case when ceilasm = 1 and name like '+%'
then ceil((fl.bytes+1)/1048576)*1048576
else bytes
end
) space_used,
count(*) number_of_files
from v$flashback_database_logfile fl,
( select /*+ no_merge */ ceilasm
from x$krasga
)
) fl,
( select sum(to_number(fblogreclsiz)) reclsiz
from x$krfblog
) fb
) fusg
order by file_type;
clear breaks
set pagesize 999
set linesize 140
column name format a25 heading "tablespace name"
column space_mb format 99g999g990D99 heading "curr df mbytes"
column maxspace_mb format 99g999g990D99 heading "max df mbytes"
column used format 99g999g990D99 heading "used mbytes"
column df_free format 99g999g990D99 heading "curr df free mbytes"
column maxdf_free format 99g999g990D99 heading "max df free mbytes"
column pct_free format 990D99 heading "% free"
column pct_maxfile_free format 990D99 heading "% maxfile free"
break on report
compute sum of space_mb on report
compute sum of maxspace_mb on report
compute sum of df_free on report
compute sum of maxdf_free on report
compute sum of used on report
select df.tablespace_name name, df.space space_mb, df.maxspace maxspace_mb, (df.space - nvl(fs.freespace,0)) used,
nvl(fs.freespace,0) df_free, (nvl(fs.freespace,0) + df.maxspace - df.space) maxdf_free,
100 * (nvl(fs.freespace,0) / df.space) pct_free,
100 * ((nvl(fs.freespace,0) + df.maxspace - df.space) / df.maxspace) pct_maxfile_free
from ( select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_data_files
group by tablespace_name
union all
select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_temp_files
group by tablespace_name
) df,
( select tablespace_name, sum(bytes)/1024/1024 freespace
from dba_free_space
group by tablespace_name
) fs
where df.tablespace_name = fs.tablespace_name(+)
order by name;
clear breaks
column hour_arch_size format 99G999D99
column hour_arch# format 9G999
column graph format a15
column dayname format a12
column dayhour format a18
break on dayname skip 1
select to_char(trunc(completion_time, 'HH'), 'DD/MM/YYYY HH24:MI') dayhour, sum(blocks * block_size)/1024/1024 hour_arch_size, count(*) hour_arch#,
rpad('*',floor(count(*)),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate, 'HH') - 4/24
group by trunc(completion_time, 'HH')
order by dayhour;
clear breaks

135
vdh/add_exadata_vmdisk.sh Normal file
View File

@@ -0,0 +1,135 @@
#!/bin/bash
### This script will create a new virtual disk using the passed name and size and attaches it on the passed vm guest.
### It also modifies the config file for the guest, so the disk will be attached again after a restart
### The script will do some checking after each step to verify a correct outcome, but will not check upfront if enough disk space is available (still to be added)
###
### This script needs to be executed as root on the dom0 of the server on which the virtual disk needs to be created
### Required parameters are <guest name>, <disk name> and <disk size>
### Do NOT add the img suffix to the disk name
### The disk size needs to be suffixed with the unit (eg 20G, to create a disk of 20 GB)
###
### Important to know is that this script will probably fail if the disk device mappings has "gaps", as it simply counts the number of disk devices for the given vm
### to determine the next free front device
### Get the input parameters
guest_name=$1
disk_name=$2
disk_size=$3
### Check if all mandatory input parameters are given
if [ "${guest_name}" == "" ] || [ "${disk_name}" == "" ] || [ "${disk_size}" == "" ]
then
echo "usage: $0 guest_name disk_name disk_size"
echo "guest_name is the name of the virtual guest on which the disk needs to be attached"
echo "disk_name is the name of the disk to be created \(without the .img suffix\)"
echo "disk_size is the size of the disk, including the unit \(eg 20G\)"
exit -1
fi
### generate the array of disk devices
### the current limit of attached devices is 37
drive_array=(xvda xvdb xvdc xvdd xvde xvdf xvdg xvdh xvdi xvdj xvdk xvdl xvdm xvdn xvdo xvdp xvdq xvdr xvds xvdt xvdv xvdw xvdx xvdy xvdz xvdaa xvdab xvdac xvdad xvdae xvdae xvdaf xvdag xvdah xvdai xvdaj xvdak)
### get the uuid of the guest on which the new disk needs to be added
guest_uuid=$(xl vm-list | grep ${guest_name}| tr -d '-' |cut -d' ' -f1)
if [ "${guest_uuid}" == "" ]
then
echo "FAIL - Could not get guest uuid, pleace check guest name"
exit -1
else
echo "Guest UUID: ${guest_uuid}"
fi
### get the number of current attached block devices, as the output also includes a header, this is already the next available slot number
### subtract 1 to compensate for the arrays, which start at 0 and not 1
### also, using xm and not xl here as xl seems to not list the disks that are attached on a running instance (exact reason still to be verified)
next_slot=$(($(xm block-list ${guest_name} | wc -l)-1))
if [ "${next_slot}" == "" ]
then
echo "FAIL - Could not determine a free slot number, check outcome of xm block list ${guest_name}"
exit -1
else
echo "Next drive slot: ${next_slot}"
fi
### convert the new slot number to a drive name
next_drive=${drive_array[${next_slot}]}
if [ "${next_drive}" == "" ] || [[ ${next_drive} != xvd* ]]
then
echo "FAIL - Could not convert ${next_slot} to drive, check the drive_array variable"
exit -1
else
echo "The new disk will be known on the vm as /dev/${next_drive}"
fi
### generate a new uuid to be used for the new disk
disk_uuid=$(uuidgen | tr -d '-')
if [ "${disk_uuid}" == "" ]
then
echo "FAIL - Could not generate a new disk_uuid, check path variable for uuidgen"
exit -1
else
echo "Generated disk UUID: ${disk_uuid}"
fi
### create the virtual disk based upon the input parameters
### check first if the disk not already exists
if [ -e "/EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img" ]
then
echo "FAIL - File /EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img already exists, use a different disk name"
exit -1
fi
### create the disk
echo "creating the disk now"
qemu-img create /EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img ${disk_size}
### check if the disks exists
if [ ! -e "/EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img" ]
then
echo "FAIL - File /EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img was not created, check free disk space"
exit -1
else
echo "Created disk image /EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img"
fi
### create the symbolic link, using the uuid of the vm guest and the newly generated uuid
ln -s /EXAVMIMAGES/GuestImages/${guest_name}/${disk_name}.img /OVS/Repositories/${guest_uuid}/VirtualDisks/${disk_uuid}.img
### check if the symbolic link was correctly created
if [ ! -h "/OVS/Repositories/${guest_uuid}/VirtualDisks/${disk_uuid}.img" ]
then
echo "FAIL - Could not create symbolic link /OVS/Repositories/${guest_uuid}/VirtualDisks/${disk_uuid}.img"
exit -1
else
echo "Symbolic link /OVS/Repositories/${guest_uuid}/VirtualDisks/${disk_uuid}.img generated"
fi
### attach the block
xm block-attach ${guest_name} file:/OVS/Repositories/${guest_uuid}/VirtualDisks/${disk_uuid}.img /dev/${next_drive} w
result_code=$?
if [ ${result_code} -ne 0 ]
then
echo "FAIL - An error occured during the attach of the virtual disk, check console output"
exit -1
else
echo "New disk was successfully attached to VM"
fi
### add the new disk to the vm config file, so it is attached when restarted
### The sed commands searches for a line with the format "disk = [<disk strings>]" and inserts the new disk into it.
### It does this by using grouping and back references (eg \1)
### The first group is "disk = [", the second contains the existing disk strings and the third "]"
### The new string is then inserted between the second and third back reference
sed -i.$(date '+%Y%m%d%H%M%S') "s/\(^disk = \[\)\(.*\)\(\]\)/\1\2,\'file:\/OVS\/Repositories\/${guest_uuid}\/VirtualDisks\/${disk_uuid}.img,${next_drive},w\'\3/" /EXAVMIMAGES/GuestImages/${guest_name}/vm.cfg
result_code=$?
if [ ${result_code} -ne 0 ]
then
echo "FAIL - An error occured during the modification of the vm config file, check console output"
exit -1
else
echo "Disk added to config file /EXAVMIMAGES/GuestImages/${guest_name}/vm.cfg"
fi
exit 0

View File

@@ -0,0 +1,12 @@
DECLARE
l_job number;
BEGIN
dbms_job.submit( job => l_job,
what => 'BEGIN dbms_stats.gather_schema_stats(ownname => ''ADM'', method_opt => ''FOR ALL INDEXED COLUMNS SIZE 1'', cascade => true); END;',
next_date => trunc(sysdate) + 6/24,
interval => 'trunc(sysdate) + 30/24'
);
END;

4
vdh/add_lun_online.sh Normal file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
### still have to add the code to check the script is executed as root
ls -1 /sys/class/fc_transport/ | tr -d [[:alpha:]] | awk -v lun_id=$1 'BEGIN {FS=":"} {system("echo \""$2 " " $3 " " lun_id "\" > /sys/class/scsi_host/host"$1"/scan")}'

48
vdh/adrci_cleanup.sh Normal file
View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
### sets the short and long retention policy for all oracle homes
### and issues a purge for all non rdbms homes (smon automatically runs a purge once every 7 days)
###
### run as Oracle owner, expects no role separation between Oracle and GI
### advice to schedule this once a week
###
### adrci_cleanup.sh [ORACLE_HOME] [adrci base] [short policy duration] [long policy duration]
export ORACLE_HOME=${1:-/u01/app/oracle/product/18.0.0.0/dbhome_1}
ADRCI=${ORACLE_HOME}/bin/adrci
adrci_base=${2:-/u01/app/oracle}
#short: script default 7 days, used for TRACE, CDUMP, UTSCDMP, IPS
shortp_policy=${3:-168}
#long: script default 31 days, used for ALERT, INCIDENT, SWEEP, STAGE, HM
longp_policy=${4:-744}
### loops through the adrci homes and sets the short and long policy
echo
echo set policies
echo using ${adrci_base} as adrci_bash
echo
for home in $(${ADRCI} exec="set base ${adrci_base}; show homes" | egrep -e "/rdbms/|/tnslsnr/|/asm/")
do
echo set policy for ${home}
${ADRCI} exec="set homepath ${home}; set control \(SHORTP_POLICY = ${shortp_policy}, LONGP_POLICY = ${longp_policy}\)"
done
### loop through the non database homes and issue the purge (db homes are done by smon automatically every 7 days)
### ---> changed to do rdbms homes as wel due to bug 29021413 (see MOS Note Doc ID 2572977.1 - ADR Files are not purged automatically)
echo
echo purging non rdbms homes
echo using ${adrci_base} as adrci_bash
echo
start=$SECONDS
for home in $(${ADRCI} exec="set base ${adrci_base}; show homes" | egrep -e "/rdbms/|/tnslsnr/|/asm/")
do
echo Purging home ${home}
homestart=$SECONDS
${ADRCI} exec="set homepath ${home}; purge"
echo Duration: $(date -u -d "0 $SECONDS sec - ${homestart} sec" +"%H:%M:%S")
echo
done
echo Total duration: $(date -u -d "0 $SECONDS sec - ${start} sec" +"%H:%M:%S")

View File

@@ -0,0 +1,64 @@
drop sequence log_errors_seq;
create sequence log_errors_seq
start with 1
increment by 1
minvalue 1
nomaxvalue
nocache
nocycle;
drop table log_errors_tab;
create table log_errors_tab
( id number,
log_date date,
err_msg clob,
stm clob,
username varchar2(30),
err_depth number
)
tablespace sysaux;
create or replace trigger log_errors_trig after servererror on database
DECLARE
id number;
-- v_user varchar2(30);
-- v_os varchar2(30);
-- v_prog varchar2(30);
-- v_cur varchar2(100);
-- v_sql varchar2(2000);
stmt varchar2 (2000);
sql_text ora_name_list_t;
l binary_integer ;
BEGIN
select log_errors_seq.nextval
into id
from dual;
l := ora_sql_txt(sql_text);
for i in 1..l loop
stmt :=stmt||sql_text(i);
end loop;
for n in 1..ora_server_error_depth loop
insert into log_errors_tab (id, log_date, err_msg, stm, username, err_depth)
values (id, sysdate, ora_server_error_msg(n),stmt, ora_login_user, n);
end loop;
EXCEPTION
-- not pretty, but....
-- avoid blocking programs because of malfunctioning error logging
when others then
null;
END LOG_ERRORS_TRIG;
/

10
vdh/all_db_links.sql Normal file
View File

@@ -0,0 +1,10 @@
column owner format a15
column username format a15
column password format a15
column host format a20
column db_link format a30
select u.name owner, l.name db_link, l.userid username, l.password password, l.host host
from sys.link$ l, sys.user$ u
where l.owner# = u.user#
order by l.name;

42
vdh/all_granted_roles.sql Normal file
View File

@@ -0,0 +1,42 @@
-- show all roles for a user, either directly assigned or nested via other roles
with user_role_hierarchy
as ( select
t2.name username, t1.granted_role
from
( select
distinct sa.userid, u.name granted_role
from
( select
t.*, connect_by_root grantee# userid
from
sys.sysauth$ t
connect by
prior privilege# = grantee#
) sa,
sys.user$ u
where
u.user# = sa.privilege#
and sa.userid in
( select
user#
from
sys.user$
where
type# = 1 -- normal users
or user# = 1 -- PUBLIC
)
) t1,
sys.user$ t2
where
t1.userid = t2.user#
)
select
*
from
user_role_hierarchy
where
username = '&user'
order by
granted_role
;

View File

@@ -0,0 +1,40 @@
set pagesize 9999
set linesize 120
set verify off
column mb_arch format 9G999G999D99
column real_mb_in_period format 9G999G999D99
column max_mb_in_period format 9G999G999D99
column min_mb_in_period format 9G999G999D99
column counted format 99G999D99
column counted_in_period format 99G999D99
prompt Enter the nbr of days the archived redo logs should be kept on disk
accept days_on_disk prompt '# Days: '
select dag, mb_arch,
sum(mb_arch) over
( order by dag
range &days_on_disk preceding
) as real_mb_in_period,
counted,
sum(counted) over
( order by dag
range &days_on_disk preceding
) counted_in_period,
max(mb_arch) over
( order by dag
range &days_on_disk preceding
) * &days_on_disk as max_mb_in_period,
min(mb_arch) over
( order by dag
range &days_on_disk preceding
) * &days_on_disk as min_mb_in_period
from ( select trunc(completion_time) dag, sum(blocks * block_size)/1024/1024 mb_arch,
count(*) counted
from v$archived_log
where months_between(trunc(sysdate), trunc(completion_time)) <= 1
and completion_time < trunc(sysdate)
group by trunc(completion_time)
);

View File

@@ -0,0 +1,64 @@
set echo off
set pagesize 999
set linesize 150
/*
Average redo generation
*/
column start_day format a22
column end_day format a22
column days_between format 99
column avg_archived_per_day format a13 heading avg_gen
select to_char(min(dag), 'DD/MM/YYYY HH24:MI:SS') start_day, to_char(max(dag) + 1 - 1/(24*60*60), 'DD/MM/YYYY HH24:MI:SS') end_day,
(max(dag) - min(dag) + 1) days_between,
to_char(avg(gen_archived_size),'9G999G999D99') avg_archived_per_day
from ( select trunc(completion_time) dag, sum(blocks * block_size)/1024/1024 gen_archived_size
from v$archived_log
where standby_dest = 'NO'
and months_between(trunc(sysdate), trunc(completion_time)) <= 1
and completion_time < trunc(sysdate)
group by trunc(completion_time)
);
/*
archived redo over the (max) last 10 days
*/
column day_arch_size format 9G999G999D99
column day_arch# format 999G999
column graph format a15
column dayname format a12
column day format a12
select to_char(day, 'DD/MM/YYYY') day, to_char(day,'DAY') dayname, day_arch_size, day_arch#, graph
from ( select trunc(completion_time) day, sum(blocks * block_size)/1024/1024 day_arch_size, count(*) day_arch#,
rpad('*',floor(count(*)/10),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 10
group by trunc(completion_time)
order by day
);
/*
archived redo per hour over the (max) last 2 days
*/
column hour_arch_size format 99G999D99
column hour_arch# format 9G999
column graph format a15
column dayname format a12
column dayhour format a18
break on dayname skip 1
select to_char(dayhour,'DAY') dayname, to_char(dayhour, 'DD/MM/YYYY HH24:MI') dayhour, hour_arch_size, hour_arch#, graph
from ( select trunc(completion_time, 'HH') dayhour, sum(blocks * block_size)/1024/1024 hour_arch_size, count(*) hour_arch#,
rpad('*',floor(count(*)/4),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 2
group by trunc(completion_time, 'HH')
order by dayhour
);
clear breaks;

19
vdh/as.sql Normal file
View File

@@ -0,0 +1,19 @@
set pagesize 999
set lines 150
col username format a13
col prog format a10 trunc
col sql_text format a41 trunc
col sid format 9999
col child for 99999
col avg_etime for 999,999.99
select sid, substr(program,1,19) prog, address, hash_value, b.sql_id, child_number child, plan_hash_value, executions execs,
(elapsed_time/decode(nvl(executions,0),0,1,executions))/1000000 avg_etime,
sql_text
from v$session a, v$sql b
where status = 'ACTIVE'
and username is not null
and a.sql_id = b.sql_id
and a.sql_child_number = b.child_number
and sql_text not like 'select sid, substr(program,1,19) prog, address, hash_value, b.sql_id, child_number child,%' -- don't show this query
order by sql_id, sql_child_number
/

101
vdh/ash_queries_wip.sql Normal file
View File

@@ -0,0 +1,101 @@
set linesize 200
set pages 50000
column tm_delta_time format 99G999G999G999 heading "tm time|delta (µs)"
column tm_delta_db_time format 99G999G999G999 heading "tm db|delta (µs)"
column tm_delta_cpu_time format 99G999G999G999 heading "tm cpu|delta (µs)"
column time_waited format 99G999G999G999 heading "waited|(µs)"
column delta_time format 99G999G999G999 heading "delta time|(µs)"
column first_time format a15 heading "first sample"
column last_time format a15 heading "last sample"
select
sum(ash.tm_delta_time) tm_delta_time,
sum(ash.tm_delta_db_time) tm_delta_db_time,
sum(ash.tm_delta_cpu_time) tm_delta_cpu_time,
sum(ash.time_waited) time_waited,
sum(ash.delta_time) delta_time,
to_char(min(ash.sample_time), 'DD/MM HH24:MI:SS') first_time,
to_char(max(ash.sample_time), 'DD/MM HH24:MI:SS') last_time
from
gv$active_session_history ash
where
ash.program = 'batch.exe'
and ash.sample_time between
to_date('22/11/2012 10:38', 'DD/MM/YYYY HH24:MI')
and to_date('22/11/2012 18:20', 'DD/MM/YYYY HH24:MI')
;
set linesize 200
set pages 50000
column sql_id format a15 heading "sql id"
column tm_delta_time format 99G999G999G999 heading "tm time|delta (µs)"
column tm_delta_db_time format 99G999G999G999 heading "tm db|delta (µs)"
column tm_delta_cpu_time format 99G999G999G999 heading "tm cpu|delta (µs)"
column time_waited format 99G999G999G999 heading "waited|(µs)"
column delta_time format 99G999G999G999 heading "delta time|(µs)"
select
sql_id,
sum(ash.tm_delta_time) tm_delta_time,
sum(ash.tm_delta_db_time) tm_delta_db_time,
sum(ash.tm_delta_cpu_time) tm_delta_cpu_time,
sum(ash.time_waited) time_waited,
sum(ash.delta_time) delta_time
from
gv$active_session_history ash
where
ash.program = 'batch.exe'
and ash.sample_time between
to_date('22/11/2012 18:20', 'DD/MM/YYYY HH24:MI')
and to_date('23/11/2012 02:16', 'DD/MM/YYYY HH24:MI')
group by
sql_id
order by
tm_delta_db_time desc
;
set linesize 200
set pages 50000
column sample_id format 99999999 heading "sample"
column sample_time format a30 heading "sample time"
column session_id format 9999999 heading "session"
column session_serial# format 9999999 heading "serial#"
column sql_id format a15 heading "sql id"
column tm_delta_time format 99G999G999G999 heading "tm time|delta (µs)"
column tm_delta_db_time format 99G999G999G999 heading "tm db|delta (µs)"
column tm_delta_cpu_time format 99G999G999G999 heading "tm cpu|delta (µs)"
column time_waited format 99G999G999G999 heading "waited|(µs)"
column delta_time format 99G999G999G999 heading "delta time|(µs)"
select
ash.sample_id,
ash.sample_time,
ash.session_id,
ash.session_serial#,
ash.sql_id,
ash.tm_delta_time,
ash.tm_delta_db_time,
ash.tm_delta_cpu_time,
ash.time_waited,
ash.delta_time
from
gv$active_session_history ash
where
ash.program = 'batch.exe'
and ash.sample_time > to_date('21/11/2012 22:00', 'DD/MM/YYYY HH24:MI')
and ash.sql_id = 'c6v8hz7wg8mym'
order by
ash.sample_id,
ash.session_id,
ash.session_serial#
;

26
vdh/ash_query_freq.sql Normal file
View File

@@ -0,0 +1,26 @@
set linesize 120
set pages 50000
column sql_id format a15 heading "sql id"
column plan_hash_value format 99999999999 heading "plan hash"
column total_exec format 999G999G999G999 heading "total exec|(µs)"
column total_elap format 999G999G999G999 heading "total elap|(µs)"
column avg_elap format 999G999G999D99 heading "avg elap|(µs)"
select
sql_id,
plan_hash_value,
sum(executions_delta) total_exec,
sum(elapsed_time_delta) total_elap,
(sum(elapsed_time_delta) / sum(executions_delta)) avg_elap
from
dba_hist_sqlstat
where
sql_id = '&sql_id'
group by
sql_id,
plan_hash_value
order by
avg_elap desc
;

View File

@@ -0,0 +1,22 @@
set linesize 150
column compatibility format a15
column db_compatibility format a15
column au_size_mb format 99D99
column total_mb format 999G999G999D99
column free_mb format 999G999G999D99
column usable_file_mb format 9G999G999D99
column req_m_free_mb format 9G999G999D99
column name format a15
select
name, type, sector_size, block_size, total_mb, free_mb,
required_mirror_free_mb req_m_free_mb, usable_file_mb,
(allocation_unit_size/1024/1024) au_size_mb,
compatibility,
database_compatibility db_compatibility
from
v$asm_diskgroup
order by
name
;

14
vdh/asm_operations.sql Normal file
View File

@@ -0,0 +1,14 @@
select
g.name,
o.operation,
o.state,
o.sofar,
o.est_work,
o.est_minutes,
power
from
v$asm_diskgroup g,
v$asm_operation o
where
o.group_number = g.group_number
;

123
vdh/asm_queries.sql Normal file
View File

@@ -0,0 +1,123 @@
-- generate sql to delete all asm files for a given database
column a format a120
set linesize 120
select 'alter diskgroup ' || gname || ' drop file ''+' || gname || sys_connect_by_path(aname, '/') || ''';' a
from ( select b.name gname, a.parent_index pindex, a.name aname,
a.reference_index rindex , a.system_created, a.alias_directory
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
)
where alias_directory = 'N'
and system_created = 'Y'
start with (mod(pindex, power(2, 24))) = 0
and rindex in
( select a.reference_index
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
and (mod(a.parent_index, power(2, 24))) = 0
and a.name = '&DATABASENAME'
)
connect by prior rindex = pindex;
-- generate sql to delete all the asm files of a given type for a given database
column a format a120
set linesize 120
select 'alter diskgroup ' || gname || ' drop file ''+' || gname || sys_connect_by_path(aname, '/') || ''';' a
from ( select b.name gname, a.parent_index pindex, a.name aname,
a.reference_index rindex , a.system_created, a.alias_directory,
c.type file_type
from v$asm_alias a, v$asm_diskgroup b, v$asm_file c
where a.group_number = b.group_number
and a.group_number = c.group_number(+)
and a.file_number = c.file_number(+)
and a.file_incarnation = c.incarnation(+)
)
where alias_directory = 'N'
and system_created = 'Y'
and file_type = '&FILETYPE'
start with (mod(pindex, power(2, 24))) = 0
and rindex in
( select a.reference_index
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
and (mod(a.parent_index, power(2, 24))) = 0
and a.name = '&DATABASENAME'
)
connect by prior rindex = pindex;
-- generate sql to delete all the asm files except of a given type for a given database
column a format a120
set linesize 120
select 'alter diskgroup ' || gname || ' drop file ''+' || gname || sys_connect_by_path(aname, '/') || ''';' a
from ( select b.name gname, a.parent_index pindex, a.name aname,
a.reference_index rindex , a.system_created, a.alias_directory,
c.type file_type
from v$asm_alias a, v$asm_diskgroup b, v$asm_file c
where a.group_number = b.group_number
and a.group_number = c.group_number(+)
and a.file_number = c.file_number(+)
and a.file_incarnation = c.incarnation(+)
)
where alias_directory = 'N'
and system_created = 'Y'
and file_type != '&FILETYPE'
start with (mod(pindex, power(2, 24))) = 0
and rindex in
( select a.reference_index
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
and (mod(a.parent_index, power(2, 24))) = 0
and a.name = '&DATABASENAME'
)
connect by prior rindex = pindex;
-- generate a list of all the asm files / directories / aliasses for a given database
column full_alias_path format a75
column file_type format a15
select concat('+'||gname, sys_connect_by_path(aname, '/')) full_alias_path,
system_created, alias_directory, file_type
from ( select b.name gname, a.parent_index pindex, a.name aname,
a.reference_index rindex , a.system_created, a.alias_directory,
c.type file_type
from v$asm_alias a, v$asm_diskgroup b, v$asm_file c
where a.group_number = b.group_number
and a.group_number = c.group_number(+)
and a.file_number = c.file_number(+)
and a.file_incarnation = c.incarnation(+)
)
start with (mod(pindex, power(2, 24))) = 0
and rindex in
( select a.reference_index
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
and (mod(a.parent_index, power(2, 24))) = 0
and a.name = '&DATABASENAME'
)
connect by prior rindex = pindex;
-- generate a list of all the asm files of a given type for a given database
column full_alias_path format a70
column file_type format a15
select concat('+'||gname, sys_connect_by_path(aname, '/')) full_alias_path,
system_created, alias_directory, file_type
from ( select b.name gname, a.parent_index pindex, a.name aname,
a.reference_index rindex , a.system_created, a.alias_directory,
c.type file_type
from v$asm_alias a, v$asm_diskgroup b, v$asm_file c
where a.group_number = b.group_number
and a.group_number = c.group_number(+)
and a.file_number = c.file_number(+)
and a.file_incarnation = c.incarnation(+)
)
where alias_directory = 'N'
and system_created = 'Y'
and file_type = '&FILETYPE'
start with (mod(pindex, power(2, 24))) = 0
and rindex in
( select a.reference_index
from v$asm_alias a, v$asm_diskgroup b
where a.group_number = b.group_number
and (mod(a.parent_index, power(2, 24))) = 0
and a.name = '&DATABASENAME'
)
connect by prior rindex = pindex;

245
vdh/asmiostat.scriptje Normal file
View File

@@ -0,0 +1,245 @@
#!/bin/ksh
#
# NAME
# asmiostat.sh
#
# DESCRIPTION
# iostat-like output for ASM
# $ asmiostat.sh [-s ASM ORACLE_SID] [-h ASM ORACLE_HOME] [-g Diskgroup]
# [-f disk path filter] [<interval>] [<count>]
#
# NOTES
# Creates persistent SQL*Plus connection to the +ASM instance implemented
# as a ksh co-process
#
# AUTHOR
# Doug Utzig
#
# MODIFIED
# dutzig 08/18/05 - original version
#
ORACLE_SID=+ASM
NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P1
NLS_DATE_FORMAT='DD-MON-YYYY HH24:MI:SS'
endOfOutput="_EOP$$"
typeset -u diskgroup
typeset diskgroup_string="Disk Group: All diskgroups"
typeset usage="
$0 [-s ASM ORACLE_SID] [-h ASM ORACLE_HOME] [-g diskgroup] [<interval>] [<count>]
Output:
DiskPath - Path to ASM disk
DiskName - ASM disk name
Gr - ASM disk group number
Dsk - ASM disk number
Reads - Reads
Writes - Writes
AvRdTm - Average read time (in msec)
AvWrTm - Average write time (in msec)
KBRd - Kilobytes read
KBWr - Kilobytes written
AvRdSz - Average read size (in bytes)
AvWrSz - Average write size (in bytes)
RdEr - Read errors
WrEr - Write errors
"
while getopts ":s:h:g:f" option; do
case $option in
s) ORACLE_SID="$OPTARG" ;;
h) ORACLE_HOME="$OPTARG"
LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH ;;
g) diskgroup="$OPTARG"
diskgroup_string="Disk Group: $diskgroup" ;;
f) print '-f option not implemented' ;;
:) print "Option $OPTARG needs a value"
print "$usage"
exit 1 ;;
\?) print "Invalid option $OPTARG"
print "$usage"
exit 1 ;;
esac
done
shift OPTIND-1
typeset -i interval=${1:-10} count=${2:-0} index=0
#
# Verify interval and count arguments are valid
#
(( interval <=0 || count<0 )) && {
print 'Invalid parameter: <interval> must be > 0; <count> must be >= 0'
print "$usage"
exit 1
}
#
# Query to run against v$asm_disk_stat
#
if [[ -z $diskgroup ]]; then
query="select group_number, disk_number, name, path, reads, writes, read_errs, write_errs, read_time, write_time, bytes_read, bytes_written from v\$asm_disk_stat where group_number>0 order by group_number, disk_number;"
else
query="select group_number, disk_number, name, path, reads, writes, read_errs, write_errs, read_time, write_time, bytes_read, bytes_written from v\$asm_disk_stat where group_number=(select group_number from v\$asm_diskgroup_stat where name=regexp_replace('$diskgroup','^\+','')) order by group_number, disk_number;"
fi
#
# Check for version 10.2 or later
#
typeset version minversion=10.2
version=$($ORACLE_HOME/bin/exp </dev/null 2>&1 | grep "Export: " | sed -e 's/^Export: Release \([0-9][0-9]*\.[0-9][0-9]*\).*/\1/')
if ! (print "$version<$minversion" | bc >/dev/null 2>&1); then
print "$0 requires Oracle Database Release $minversion or later"
exit 1
fi
#############################################################################
#
# Fatal error
#----------------------------------------------------------------------------
function fatalError {
print -u2 -- "Error: $1"
exit 1
}
#############################################################################
#
# Drain all of the sqlplus output - stop when we see our well known string
#----------------------------------------------------------------------------
function drainOutput {
typeset dispose=${1:-'dispose'} output
while :; do
read -p output || fatalError 'Read from co-process failed [$0]'
if [[ $QUERYDEBUG == ON ]]; then print $output; fi
if [[ $output == $endOfOutput* ]]; then break; fi
[[ $dispose != 'dispose' ]] && print -- $output
done
}
#############################################################################
#
# Ensure the instance is running and it is of type ASM
#----------------------------------------------------------------------------
function verifyASMinstance {
typeset asmcmdPath=$ORACLE_HOME/bin/asmcmd
[[ ! -x $asmcmdPath ]] && fatalError "Invalid ORACLE_HOME $ORACLE_HOME: $asmcmdPath does not exist"
$asmcmdPath pwd 2>/dev/null | grep -q '^\+$' || fatalError "$ORACLE_SID is not an ASM instance"
}
#############################################################################
#
# Start the sqlplus coprocess
#----------------------------------------------------------------------------
function startSqlplus {
# start sqlplus, setup the env
$ORACLE_HOME/bin/sqlplus -s '/ as sysdba' |&
print -p 'whenever sqlerror exit failure' \
&& print -p "set pagesize 9999 linesize 9999 feedback off heading off" \
&& print -p "prompt $endOfOutput" \
|| fatalError 'Write to co-process failed (startSqlplus)'
drainOutput dispose
}
#############################################################################
#############################################################################
# MAIN
#----------------------------------------------------------------------------
verifyASMinstance
startSqlplus
#
# Loop as many times as requested or forever
#
while :; do
print -p "$query" \
&& print -p "prompt $endOfOutput" \
|| fatalError 'Write to co-process failed (collectData)'
stats=$(drainOutput keep)
print -- "$stats\nEOL"
index=index+1
(( count<index && count>0 )) && break
sleep $interval
done | \
awk '
BEGIN { firstSample=1
}
/^EOL$/ {
firstSample=0; firstLine=1
next
}
{
path=$4
if (path ~ /^ *$/) next
group[path]=$1; disk[path]=$2; name[path]=$3
reads[path]=$5; writes[path]=$6
readErrors[path]=$7; writeErrors[path]=$8
readTime[path]=$9; writeTime[path]=$10
readBytes[path]=$11; writeBytes[path]=$12
# reads and writes
readsDiff[path]=reads[path]-readsPrev[path]
writesDiff[path]=writes[path]-writesPrev[path]
# read errors and write errors
readErrorsDiff[path]=readErrors[path]-readErrorsPrev[path]
writeErrorsDiff[path]=writeErrors[path]-writeErrorsPrev[path]
# read time and write time
readTimeDiff[path]=readTime[path]-readTimePrev[path]
writeTimeDiff[path]=writeTime[path]-writeTimePrev[path]
# average read time and average write time in msec (data provided in csec)
avgReadTime[path]=0; avgWriteTime[path]=0
if ( readsDiff[path] ) avgReadTime[path]=(readTimeDiff[path]/readsDiff[path])*1000
if ( writesDiff[path]) avgWriteTime[path]=(writeTimeDiff[path]/writesDiff[path])*1000
# bytes and KB read and bytes and KB written
readBytesDiff[path]=readBytes[path]-readBytesPrev[path]
writeBytesDiff[path]=writeBytes[path]-writeBytesPrev[path]
readKb[path]=readBytesDiff[path]/1024
writeKb[path]=writeBytesDiff[path]/1024
# average read size and average write size
avgReadSize[path]=0; avgWriteSize[path]=0
if ( readsDiff[path] ) avgReadSize[path]=readBytesDiff[path]/readsDiff[path]
if ( writesDiff[path] ) avgWriteSize[path]=writeBytesDiff[path]/writesDiff[path]
if (!firstSample) {
if (firstLine) {
"date" | getline now; close("date")
printf "\n"
printf "Date: %s Interval: %d secs %s\n\n", now, '"$interval"', "'"$diskgroup_string"'"
printf "%-40s %2s %3s %8s %8s %6s %6s %8s %8s %7s %7s %4s %4s\n", \
"DiskPath - DiskName","Gr","Dsk","Reads","Writes","AvRdTm",\
"AvWrTm","KBRd","KBWr","AvRdSz","AvWrSz", "RdEr", "WrEr"
firstLine=0
}
printf "%-40s %2s %3s %8d %8d %6.1f %6.1f %8d %8d %7d %7d %4d %4d\n", \
path " - " name[path], group[path], disk[path], \
readsDiff[path], writesDiff[path], \
avgReadTime[path], avgWriteTime[path], \
readKb[path], writeKb[path], \
avgReadSize[path], avgWriteSize[path], \
readErrorsDiff[path], writeErrorsDiff[path]
}
readsPrev[path]=reads[path]; writesPrev[path]=writes[path]
readErrorsPrev[path]=readErrors[path]; writeErrorsPrev[path]=writeErrors[path]
readTimePrev[path]=readTime[path]; writeTimePrev[path]=writeTime[path]
readBytesPrev[path]=readBytes[path]; writeBytesPrev[path]=writeBytes[path]
}
END {
}
'
exit 0

BIN
vdh/asmiostat.zip Normal file

Binary file not shown.

13
vdh/avg_archived_size.sql Normal file
View File

@@ -0,0 +1,13 @@
select instance_name from v$instance;
select to_char(min(dag), 'DD/MM/YYYY HH24:MI:SS') start_day, to_char(max(dag) + 1 - 1/(24*60*60), 'DD/MM/YYYY HH24:MI:SS') end_day,
(max(dag) - min(dag) + 1) days_between,
to_char(avg(gen_archived_size),'9G999G999D99') avg_archived_per_day
from ( select trunc(completion_time) dag, sum(blocks * block_size)/1024/1024 gen_archived_size
from v$archived_log
where months_between(trunc(sysdate), trunc(completion_time)) <= 1
and completion_time < trunc(sysdate)
group by trunc(completion_time)
);
exit

3
vdh/avgskew.sql Normal file
View File

@@ -0,0 +1,3 @@
select avg(pk_col) from kso.skew
where col1 > 0
/

View File

@@ -0,0 +1,85 @@
set linesize 500
set pagesize 9999
alter session set nls_numeric_characters=',.';
set trimspool on
set tab off
set verify off
set feedback off
column instance_number format 99
column bsnap_id format 9999999
column bsnap_time format a16
column esnap_id format 9999999
column esnap_time format a16
column phy_read_total_bytes format 9G999G999G999
column phy_read_bytes format 9G999G999G999
column phy_write_total_bytes format 9G999G999G999
column phy_write_bytes format 9G999G999G999
column phy_read_total_mb format 999G999D99
column phy_read_mb format 999G999D99
column phy_write_total_mb format 999G999D99
column phy_write_mb format 999G999D99
column phy_io_total_mb format 999G999D99
column phy_io_mb format 999G999D99
WITH p as
( select dbid, instance_number, snap_id,
lag(snap_id, 1, snap_id) over
( partition by dbid, instance_number
order by snap_id
) prev_snap_id,
begin_interval_time, end_interval_time
from dba_hist_snapshot
where begin_interval_time between
to_timestamp ('01/10/2019 00:00', 'DD/MM/YYYY HH24:MI')
and to_timestamp ('15/10/2019 00:00', 'DD/MM/YYYY HH24:MI')
),
s as
( select d.name database, p.dbid, p.instance_number, p.prev_snap_id bsnap_id, p.snap_id esnap_id,
p.begin_interval_time bsnap_time, p.end_interval_time esnap_time, bs.stat_name,
round((es.value-bs.value)/( extract(second from (p.end_interval_time - p.begin_interval_time))
+ extract(minute from (p.end_interval_time - p.begin_interval_time)) * 60
+ extract(hour from (p.end_interval_time - p.begin_interval_time)) * 60 * 60
+ extract(day from (p.end_interval_time - p.begin_interval_time)) * 24 * 60 * 60
)
,6
) valuepersecond
from v$database d, p,
dba_hist_sysstat bs, dba_hist_sysstat es
where d.dbid = p.dbid
and ( p.dbid = bs.dbid
and p.instance_number = bs.instance_number
and p.prev_snap_id = bs.snap_id
)
and ( p.dbid = es.dbid
and p.instance_number = es.instance_number
and p.snap_id = es.snap_id
)
and ( bs.stat_id = es.stat_id
and bs.instance_number = es.instance_number
and bs.stat_name=es.stat_name
)
and bs.stat_name in
( 'physical read total bytes','physical read bytes','physical write total bytes','physical write bytes')
),
g as
( select /*+ FIRST_ROWS */
database, instance_number, bsnap_id, esnap_id, bsnap_time, esnap_time,
sum(decode( stat_name, 'physical read total bytes' , valuepersecond, 0 )) phy_read_total_bytes,
sum(decode( stat_name, 'physical read bytes' , valuepersecond, 0 )) phy_read_bytes,
sum(decode( stat_name, 'physical write total bytes' , valuepersecond, 0 )) phy_write_total_bytes,
sum(decode( stat_name, 'physical write bytes' , valuepersecond, 0 )) phy_write_bytes
from s
group by database,instance_number, bsnap_id, esnap_id, bsnap_time, esnap_time
)
select instance_number, bsnap_id, to_char(bsnap_time,'DD-MON-YY HH24:MI') bsnap_time_str, esnap_id, to_char(esnap_time,'DD-MON-YY HH24:MI') esnap_time_str,
phy_read_total_bytes/1024/1024 phy_read_total_mb,
phy_read_bytes/1024/1024 phy_read_mb,
phy_write_total_bytes/1024/1024 phy_write_total_mb,
phy_write_bytes/1024/1024 phy_write_mb,
(phy_read_total_bytes + phy_write_total_bytes) /1024/1024 phy_io_total_mb,
(phy_read_bytes + phy_write_bytes) /1024/1024 phy_io_mb
from g
order by instance_number, bsnap_time;

202
vdh/awr_load_history.sql Normal file
View File

@@ -0,0 +1,202 @@
/* when using statspack, this query shows the statspack summary (same values as 'Load Profile' in statspack report)
for each snapshot during the last week.
Thus you can quickly check the database load history and then run statspack report on desired snap_id if you want further details.
It shows following columns for each timestamp:
Redo size (in blocks) per second
Logical reads per second
Block changes per second
Physical reads per second (disk io/secs)
Physical writes per second
User calls per second
Parses per second
Hard parses per second
Sorts per second
Logons per second
Executes per second
Transactions per second
Blocks changed per Read %
Recursive Call %
Rollback per transaction %
Rows per Sort %
cpu per elapsed %
Buffer hit ratio %
This query shows history for the seven last days.
You can modify it on the first lines
*/
set linesize 500
set pagesize 9999
alter session set nls_numeric_characters=',.';
set trimspool on
set tab off
set verify off
set feedback off
column instance_number format 99
column bsnap_id format 9999999
column bsnap_time format a16
column esnap_id format 9999999
column esnap_time format a16
column redo_blocks format 999999999990D999
column logical_reads format 999999999990D999
column block_changes format 999999999990D999
column physical_reads format 999999999990D999
column physical_writes format 999999999990D999
column user_calls format 999999999990D999
column parses format 999999999990D999
column hard_parses format 999999999990D999
column sorts format 999999999990D999
column logons format 999999999990D999
column executes format 999999999990D999
column transactions format 999999999990D999
column changes_per_read format a8
column recursive format a8
column rollback format a8
column rows_per_sort format a18
column cpusecs_pct format 99990D00
column rec_cpusecs_pct format 99990D00
column parse_cpusecs_pct format 99990D00
column buffer_hit format a8
column undo_records format 99999999999999999
column rollbacks format 99999999999999999
column a format a500
prompt Enter the begindate in the format DD/MM/YYYY HH24:MI
accept start_time prompt 'begin date: '
prompt Enter the enddate in the format DD/MM/YYYY HH24:MI
accept end_time prompt 'end date: '
prompt Enter the spoolfile
accept spoolfile prompt 'spool file: '
spool &spoolfile
WITH p as
( select dbid, instance_number, snap_id,
lag(snap_id, 1, snap_id) over
( partition by dbid, instance_number
order by snap_id
) prev_snap_id,
begin_interval_time, end_interval_time
from dba_hist_snapshot
where begin_interval_time between
to_timestamp ('&start_time', 'DD/MM/YYYY HH24:MI')
and to_timestamp ('&end_time', 'DD/MM/YYYY HH24:MI')
),
s as
( select d.name database, p.dbid, p.instance_number, p.prev_snap_id bsnap_id, p.snap_id esnap_id,
p.begin_interval_time bsnap_time, p.end_interval_time esnap_time, bs.stat_name,
round((es.value-bs.value)/( extract(second from (p.end_interval_time - p.begin_interval_time))
+ extract(minute from (p.end_interval_time - p.begin_interval_time)) * 60
+ extract(hour from (p.end_interval_time - p.begin_interval_time)) * 60 * 60
+ extract(day from (p.end_interval_time - p.begin_interval_time)) * 24 * 60 * 60
)
,6
) valuepersecond
from v$database d, p,
dba_hist_sysstat bs, dba_hist_sysstat es
where d.dbid = p.dbid
and ( p.dbid = bs.dbid
and p.instance_number = bs.instance_number
and p.prev_snap_id = bs.snap_id
)
and ( p.dbid = es.dbid
and p.instance_number = es.instance_number
and p.snap_id = es.snap_id
)
and ( bs.stat_id = es.stat_id
and bs.instance_number = es.instance_number
and bs.stat_name=es.stat_name
)
and bs.stat_name in
( 'redo size','redo blocks written','session logical reads','db block changes','physical reads','physical writes','user calls',
'parse count (total)','parse count (hard)','sorts (memory)','sorts (disk)','logons cumulative','execute count','user rollbacks',
'user commits', 'recursive calls','sorts (rows)','CPU used by this session','recursive cpu usage','parse time cpu',
'rollback changes - undo records applied', 'DB time', 'Read IO (MB)', 'Write IO (MB)'
)
),
g as
( select /*+ FIRST_ROWS */
database, instance_number, bsnap_id, esnap_id, bsnap_time, esnap_time,
sum(decode( stat_name, 'redo size' , valuepersecond, 0 )) redo_size,
sum(decode( stat_name, 'redo blocks written' , valuepersecond, 0 )) redo_blocks,
sum(decode( stat_name, 'session logical reads' , valuepersecond, 0 )) logical_reads,
sum(decode( stat_name, 'db block changes' , valuepersecond, 0 )) block_changes,
sum(decode( stat_name, 'physical reads' , valuepersecond, 0 )) physical_reads ,
sum(decode( stat_name, 'physical writes' , valuepersecond, 0 )) physical_writes,
sum(decode( stat_name, 'user calls' , valuepersecond, 0 )) user_calls,
sum(decode( stat_name, 'recursive calls' , valuepersecond, 0 )) recursive_calls,
sum(decode( stat_name, 'parse count (total)' , valuepersecond, 0 )) parses ,
sum(decode( stat_name, 'parse count (hard)' , valuepersecond, 0 )) hard_parses ,
sum(decode( stat_name, 'sorts (rows)' , valuepersecond, 0 )) sort_rows ,
sum(decode( stat_name, 'sorts (memory)' , valuepersecond,
'sorts (disk)' , valuepersecond, 0 )) sorts ,
sum(decode( stat_name, 'logons cumulative' , valuepersecond, 0 )) logons ,
sum(decode( stat_name, 'execute count' , valuepersecond, 0 )) executes ,
sum(decode( stat_name, 'user rollbacks' , valuepersecond,
'user commits' , valuepersecond, 0 )) transactions,
sum(decode( stat_name, 'user rollbacks' , valuepersecond, 0 )) rollbacks,
sum(decode( stat_name, 'rollback changes - undo records applied' , valuepersecond, 0 )) undo_records,
sum(decode( stat_name, 'CPU used by this session' , valuepersecond/100, 0 )) cpusecs,
sum(decode( stat_name, 'recursive cpu usage' , valuepersecond/100, 0 )) rec_cpusecs,
sum(decode( stat_name, 'parse time cpu' , valuepersecond/100, 0 )) parse_cpusecs,
sum(decode( stat_name, 'DB time' , valuepersecond, 0 )) db_time,
sum(decode( stat_name, 'Read IO (MB)' , valuepersecond, 0 )) read_io_mb,
sum(decode( stat_name, 'Write IO (MB)' , valuepersecond, 0 )) write_io_mb
from s
group by database,instance_number, bsnap_id, esnap_id, bsnap_time, esnap_time
)
/*select instance_number, bsnap_id, to_char(bsnap_time,'DD-MON-YY HH24:MI') bsnap_time_str, esnap_id, to_char(esnap_time,'DD-MON-YY HH24:MI') esnap_time_str,read_io_mb, write_io_mb
db_time, redo_blocks, logical_reads, block_changes, physical_reads,
physical_writes, user_calls, parses, hard_parses, sorts, logons, executes, transactions,
to_char(100 * (block_changes / decode(logical_reads,0,1,logical_reads)),'990D00')||'%' changes_per_read,
to_char(100 * (recursive_calls / decode(user_calls + recursive_calls, 0, 1,user_calls + recursive_calls)),'990D00') ||'%' recursive,
to_char(100 * (rollbacks / decode(transactions,0,1,transactions)),'990D00') ||'%' rollback,
to_char(decode(sorts, 0, NULL, (sort_rows/sorts)),'99999999999999999') rows_per_sort,
100 * cpusecs cpusecs_pct,
100 * rec_cpusecs rec_cpusecs_pct,
100 * parse_cpusecs parse_cpusecs_pct,
to_char(100 * (1 - physical_reads / decode(logical_reads, 0, 1,logical_reads)),'990D00') ||'%' buffer_hit,
undo_records, rollbacks
*/select
(instance_number ||';'||
bsnap_id ||';'||
to_char(bsnap_time,'DD-MON-YY HH24:MI') ||';'||
esnap_id ||';'||
to_char(esnap_time,'DD-MON-YY HH24:MI') ||';'||
db_time ||';' ||
redo_blocks ||';'||
logical_reads ||';'||
block_changes ||';'||
physical_reads ||';'||
physical_writes ||';'||
user_calls ||';'||
parses ||';'||
hard_parses ||';'||
sorts ||';'||
logons ||';'||
executes ||';'||
transactions ||';'||
to_char(100 * (block_changes / decode(logical_reads,0,1,logical_reads)),'990D00')||'%' ||';'||
to_char(100 * (recursive_calls / decode(user_calls + recursive_calls, 0, 1,user_calls + recursive_calls)),'990D00') ||'%' ||';'||
to_char(100 * (rollbacks / decode(transactions,0,1,transactions)),'990D00') ||'%' ||';'||
to_char(decode(sorts, 0, NULL, (sort_rows/sorts)),'99999999999999999') ||';'||
100 * cpusecs ||';'||
100 * rec_cpusecs ||';'||
100 * parse_cpusecs ||';'||
-- to_char(100 * (1 - physical_reads / decode(logical_reads, 0, 1,logical_reads)),'990D00') ||'%' ||';'||
undo_records ||';'||
rollbacks
) a
from g
order by instance_number, bsnap_time;
spool off
set feedback on

100
vdh/awr_long_queries.sql Normal file
View File

@@ -0,0 +1,100 @@
set linesize 500
set pages 50000
set long 50000
--set markup html on
column total_quries format 9G999G999
column distinct_quries format 9G999G999
column inst_nbr format 999
column begin_interval_time_str format a20
column end_interval_time_str format a20
column module format a20
column action format a15
column sql_profile format a15
column parsing_schema_name format a30
column fetches_delta_str format a14
column sorts_delta_str format a14
column exec_delta_str format a14
column px_exec_delta_str format a14
column disk_reads_delta_str format a14
column buffer_gets_delta_str format a14
column cpu_sec_str format a17
column elaps_sec_str format a17
column sql_text format a500 word_wrapped
--spool awr_queries_longer_than_10_minutes.html
select
count(*) total_queries,
count(distinct stat.sql_id) distinct_queries
from
dba_hist_snapshot snap
join dba_hist_sqlstat stat
on ( snap.dbid = stat.dbid
and snap.instance_number = stat.instance_number
and snap.snap_id = stat.snap_id
)
join dba_hist_sqltext sqlt
on ( stat.dbid = sqlt.dbid
and stat.sql_id = sqlt.sql_id
)
where
snap.begin_interval_time > trunc(sysdate) - 1 + 19/24
and stat.parsing_schema_name not in
('SYS','SYSMAN','MDSYS','WKSYS', 'NAGIORA', 'PANDORA'
)
and sql_text not like '%/* SQL Analyze(%'
and sql_text not like 'DECLARE job BINARY_INTEGER%'
and stat.elapsed_time_delta > 10 * 60 * 1000000
order by
stat.elapsed_time_delta
;
select
snap.instance_number inst_nbr,
to_char(snap.begin_interval_time, 'DD/MM/YYYY HH24:MI:SS') begin_interval_time_str,
to_char(snap.end_interval_time, 'DD/MM/YYYY HH24:MI:SS') end_interval_time_str,
stat.sql_id,
stat.plan_hash_value,
to_char(stat.elapsed_time_delta/1000000, '9G999G999G999D99') elaps_sec_str,
to_char(stat.cpu_time_delta/1000000, '9G999G999G999D99') cpu_sec_str,
stat.module,
stat.action,
stat.sql_profile,
stat.parsing_schema_name,
to_char(stat.fetches_delta, '9G999G999G999') fetches_delta_str,
to_char(stat.sorts_delta, '9G999G999G999') sorts_delta_str,
to_char(stat.executions_delta, '9G999G999G999') exec_delta_str,
to_char(stat.px_servers_execs_delta, '9G999G999G999') px_exec_delta_str,
to_char(stat.disk_reads_delta, '9G999G999G999') disk_reads_delta_str,
to_char(stat.buffer_gets_delta, '9G999G999G999') buffer_gets_delta_str,
sqlt.sql_text
from
dba_hist_snapshot snap
join dba_hist_sqlstat stat
on ( snap.dbid = stat.dbid
and snap.instance_number = stat.instance_number
and snap.snap_id = stat.snap_id
)
join dba_hist_sqltext sqlt
on ( stat.dbid = sqlt.dbid
and stat.sql_id = sqlt.sql_id
)
where
snap.begin_interval_time > trunc(sysdate) - 1 + 19/24
and stat.parsing_schema_name not in
('SYS','SYSMAN','MDSYS','WKSYS', 'NAGIORA', 'PANDORA'
)
and sql_text not like '%/* SQL Analyze(%'
and sql_text not like 'DECLARE job BINARY_INTEGER%'
-- longer then 10 minutes
and stat.elapsed_time_delta > 10 * 60 * 1000000
order by
stat.elapsed_time_delta desc
;
--spool off
set markup html off

View File

@@ -0,0 +1,58 @@
-- Base specific query, to report on the used space by the backup pieces, split per ZFS pool
-- order by pool, db name, db unique name and dbid
-- the reported sizes are the sizes seen by rman, thus before any filesystem compression or deduplication
-- this query must be run against the rman catalog repository
set linesize 150
set pages 50000
column name format a10
column db_unique_name format a30
column pool format a5
column GB format 9G999G999D99
compute sum of GB on pool
compute sum of GB on report
break on pool skip 1 on report
with backup_files
as
( select
db_key,
site_key,
bytes,
replace(regexp_substr(regexp_substr(bpd.handle,'/[^/]+/',2),'_[^_]+',1,2),'_') pool
from
rc_backup_piece_details bpd
where
device_type = 'DISK'
)
select
bf.pool,
db.dbid,
db.name,
st.db_unique_name,
sum(bf.bytes)/1024/1024/1024 GB
from
backup_files bf,
rc_site st,
rc_database db
where
bf.site_key = st.site_key
and db.db_key = bf.db_key
group by
bf.pool,
db.dbid,
db.name,
st.db_unique_name
order by
bf.pool,
db.name,
st.db_unique_name,
db.dbid
;
clear breaks
clear computes

View File

@@ -0,0 +1,58 @@
-- Base specific query, to report on the used space by the backup pieces, split per ZFS pool
-- order by db name, db unique name, dbid and pool
-- the reported sizes are the sizes seen by rman, thus before any filesystem compression or deduplication
-- this query must be run against the rman catalog repository
set linesize 150
set pages 50000
column name format a10
column db_unique_name format a30
column pool format a5
column GB format 9G999G999D99
compute sum of GB on db_unique_name
compute sum of GB on report
break on dbid on name on db_unique_name skip 1 on pool on report
with backup_files
as
( select
db_key,
site_key,
bytes,
replace(regexp_substr(regexp_substr(bpd.handle,'/[^/]+/',2),'_[^_]+',1,2),'_') pool
from
rc_backup_piece_details bpd
where
device_type = 'DISK'
)
select
db.dbid,
db.name,
st.db_unique_name,
bf.pool,
sum(bf.bytes)/1024/1024/1024 GB
from
backup_files bf,
rc_site st,
rc_database db
where
bf.site_key = st.site_key
and db.db_key = bf.db_key
group by
db.dbid,
db.name,
st.db_unique_name,
bf.pool
order by
db.name,
st.db_unique_name,
db.dbid,
bf.pool
;
clear breaks
clear computes

View File

@@ -0,0 +1,143 @@
with
grantees_to as
( select
distinct
connect_by_root(usr.name) username,
r_usr.name name
from
sys.sysauth$@DWHPRD_X3DM_LNK sau,
sys.user$@DWHPRD_X3DM_LNK r_usr,
sys.user$@DWHPRD_X3DM_LNK usr
where
sau.privilege# = r_usr.user#
and sau.grantee# = usr.user#
connect by
prior privilege# = grantee#
start with grantee# in
( select
user#
from
sys.user$@DWHPRD_X3DM_LNK
where
name in
( select
owner
from
dba_objects@DWHPRD_X3DM_LNK
where
object_type = 'DATABASE LINK'
)
)
union all
select
distinct
owner,
owner
from
dba_objects@DWHPRD_X3DM_LNK
where
object_type = 'DATABASE LINK'
),
users_to as
( select
distinct
owner
from
dba_objects@DWHPRD_X3DM_LNK a
where
object_type = 'DATABASE LINK'
and not exists
( select
null
from
uptdba.exclude_oracle_users b
where
b.user_name = a.owner
)
minus
select
grt.username
from
grantees_to grt,
dba_sys_privs@DWHPRD_X3DM_LNK sp
where
grt.name = sp.grantee
and privilege = 'CREATE DATABASE LINK'
),
grantees_from as
( select
distinct
connect_by_root(usr.name) username,
r_usr.name name
from
sys.sysauth$@DWHPRD_LNK sau,
sys.user$@DWHPRD_LNK r_usr,
sys.user$@DWHPRD_LNK usr
where
sau.privilege# = r_usr.user#
and sau.grantee# = usr.user#
connect by
prior privilege# = grantee#
start with grantee# in
( select
user#
from
sys.user$@DWHPRD_LNK
where
name in
( select
owner
from
dba_objects@DWHPRD_LNK
where
object_type = 'DATABASE LINK'
)
)
union all
select
distinct
owner,
owner
from
dba_objects@DWHPRD_LNK
where
object_type = 'DATABASE LINK'
),
users_from as
( select
distinct
owner
from
dba_objects@DWHPRD_LNK a
where
object_type = 'DATABASE LINK'
and not exists
( select
null
from
uptdba.exclude_oracle_users b
where
b.user_name = a.owner
)
minus
select
grt.username
from
grantees_from grt,
dba_sys_privs@DWHPRD_LNK sp
where
grt.name = sp.grantee
and privilege = 'CREATE DATABASE LINK'
)
select
owner
from
users_to
union
select
owner
from
users_from
order by
owner
;

View File

@@ -0,0 +1,58 @@
with
grantees_to as
( select distinct connect_by_root(usr.name) username, r_usr.name name
from sys.sysauth$@DWHUAT_LNK sau, sys.user$@DWHUAT_LNK r_usr, sys.user$@DWHUAT_LNK usr
where sau.privilege# = r_usr.user#
and sau.grantee# = usr.user#
connect by
prior privilege# = grantee#
start with grantee# in
( select user#
from sys.user$@DWHUAT_LNK
where name in (select owner from dba_objects@DWHUAT_LNK where object_type = 'DATABASE LINK'))
union all
select distinct owner, owner
from dba_objects@DWHUAT_LNK
where object_type = 'DATABASE LINK'
),
users_to as
( select distinct owner
from dba_objects@DWHUAT_LNK a
where object_type = 'DATABASE LINK'
and not exists (select null from uptdba.exclude_oracle_users b where b.user_name = a.owner)
minus
select grt.username
from grantees_to grt, dba_sys_privs@DWHUAT_LNK sp
where grt.name = sp.grantee
and privilege = 'CREATE DATABASE LINK'
),
grantees_from as
( select distinct connect_by_root(usr.name) username, r_usr.name name
from sys.sysauth$@DWHPRD_X3DM_LNK sau, sys.user$@DWHPRD_X3DM_LNK r_usr, sys.user$@DWHPRD_X3DM_LNK usr
where sau.privilege# = r_usr.user#
and sau.grantee# = usr.user#
connect by
prior privilege# = grantee#
start with grantee# in (select user# from sys.user$@DWHPRD_X3DM_LNK where name in (select owner from dba_objects@DWHPRD_X3DM_LNK where object_type = 'DATABASE LINK'))
union all
select distinct owner, owner
from dba_objects@DWHPRD_X3DM_LNK
where object_type = 'DATABASE LINK'
),
users_from as
( select distinct owner
from dba_objects@DWHPRD_X3DM_LNK a
where object_type = 'DATABASE LINK'
and not exists (select null from uptdba.exclude_oracle_users b where b.user_name = a.owner)
minus
select grt.username
from grantees_from grt, dba_sys_privs@DWHPRD_X3DM_LNK sp
where grt.name = sp.grantee
and privilege = 'CREATE DATABASE LINK'
)
select owner
from users_to
union
select owner
from users_from
order by owner;

View File

@@ -0,0 +1 @@
for i in `ps -ef | grep pmon | grep -v grep | cut -d'_' -f3`; do export ORACLE_SID=$i; export ORACLE_UNQNAME=`echo $i | tr -d [:digit:]`; emctl start dbconsole; done

View File

@@ -0,0 +1,63 @@
set lines 150
set pages 9999
set feedback off
set echo off
select distinct sid from v$mystat;
alter system flush buffer_cache;
alter session set "_serial_direct_read"='always';
--exec dbms_monitor.session_trace_enable(waits => true)
DECLARE
l_cnt number;
l_start_value number;
l_end_value number;
l_diff number;
l_start timestamp;
l_end timestamp;
l_elapsed number;
l_throughput number;
BEGIN
select value
into l_start_value
from v$mystat mystat, v$statname statname
where mystat.statistic# = statname.statistic#
and statname.name = 'physical read bytes';
l_start := systimestamp;
select /*+ FULL(A) PARALLEL(A,10) */ count(*) into l_cnt from sysadm.COMLONG A;
-- select /*+ FULL(A) NOPARALLEL(A) */ count(*) into l_cnt from sysadm.COMLONG A;
-- select /*+ FULL(A) */ count(*) into l_cnt from c2mv5.hist_act A;
l_end := systimestamp;
select value
into l_end_value
from v$mystat mystat, v$statname statname
where mystat.statistic# = statname.statistic#
and statname.name = 'physical read bytes';
l_elapsed := extract(day from (l_end - l_start)) * 24 * 60 * 60
+ extract(hour from (l_end - l_start)) * 60 * 60
+ extract(minute from (l_end - l_start)) * 60
+ extract(second from (l_end - l_start));
l_diff := (l_end_value - l_start_value);
l_throughput := (l_diff/l_elapsed)/1024/1024;
dbms_output.put_line('physical MB read: ' || to_char(l_diff/1024/1024, '999G999G999D99'));
dbms_output.put_line('elapsed seconds: ' || to_char(l_elapsed, '9G999G999D99'));
dbms_output.put_line('measured throughput: ' || to_char(l_throughput, '999G999D99'));
END;
/
--exec dbms_monitor.session_trace_disable();

27
vdh/blocked_sessions.sql Normal file
View File

@@ -0,0 +1,27 @@
set linesize 150
column username format a20
column bi format 99
column bs format 9999
column siw format 999999
column rwo# format 999999
select inst_id, sid, serial#, username, sql_id, blocking_instance bi, blocking_session bs,
seconds_in_wait siw, row_wait_obj# rwo#, row_wait_file# rwf#, row_wait_block# rwb#,
row_wait_row# rwr#
from gv$session
where blocking_session is not null;
/*
FUNCTION ROWID_CREATE RETURNS ROWID
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
ROWID_TYPE NUMBER IN
OBJECT_NUMBER NUMBER IN
RELATIVE_FNO NUMBER IN
BLOCK_NUMBER NUMBER IN
ROW_NUMBER NUMBER IN
select dbms_rowid.rowid_create(1, 81574, 26, 286, 262) from dual;
*/

44
vdh/blocked_sessions2.sql Normal file
View File

@@ -0,0 +1,44 @@
set linesize 150
column username format a20
column bi format 99
column bs format 9999
column siw format 999999
column rwo# format 99999999
column tl format a5
column inst_id format a10
with sessions
as ( select /*+ MATERIALIZE */
inst_id, sid, serial#, username, sql_id, blocking_instance bi, blocking_session bs,
seconds_in_wait siw, row_wait_obj# rwo#, row_wait_file# rwf#, row_wait_block# rwb#,
row_wait_row# rwr#
from gv$session
)
select lpad('-', level, '-') || inst_id inst_id, sid, serial#, username, sql_id, bi, bs, siw, rwo#,
rwf#, rwb#, rwr#
from sessions
where bs is not null
or (inst_id, sid) in
( select bi, bs
from sessions
)
start with bs is null
connect by ( bi = prior inst_id
and bs = prior sid
)
;
/*
FUNCTION ROWID_CREATE RETURNS ROWID
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
ROWID_TYPE NUMBER IN
OBJECT_NUMBER NUMBER IN
RELATIVE_FNO NUMBER IN
BLOCK_NUMBER NUMBER IN
ROW_NUMBER NUMBER IN
select dbms_rowid.rowid_create(1, 81574, 26, 286, 262) from dual;
*/

7
vdh/bloom_join2.sql Normal file
View File

@@ -0,0 +1,7 @@
-- skew2 and skew3 must be defined as PX
select /*+ bloom join 2 use_hash (skew temp_skew) */ a.col2, sum(a.col1)
from kso.skew3 a, kso.skew2 b
where a.pk_col = b.pk_col
and b.col1 = 1
group by a.col2
/

View File

@@ -0,0 +1,80 @@
-- calculates the sum of the ALLOCATED sizes for the given non partitioned tables
-- and their dependend indexes and lob segments
column mb format 9G999G999D99
column extents format 999G999D99
column blocks format 999G999G999D99
compute sum label total of mb on report
break on report
with my_segments
as
( select
--+ MATERIALIZE
tab.owner table_owner,
tab.table_name,
ind.owner index_owner,
ind.index_name,
lob.segment_name lob_segment,
lob.index_name lob_ind_segment
from
dba_tables tab
left outer join dba_indexes ind
on ( tab.owner = ind.table_owner
and tab.table_name = ind.table_name
)
left outer join dba_lobs lob
on ( tab.owner = lob.owner
and tab.table_name = lob.table_name
)
where
tab.owner = '&owner'
and tab.table_name = '&table_name'
)
select
segment_type,
sum(extents) extents,
sum(blocks) blocks,
sum(bytes)/1024/1024 mb
from
dba_segments dseg
where
(owner,segment_name) in
( select
seg.table_owner,
seg.table_name
from
my_segments seg
)
or (owner,segment_name) in
( select
seg.index_owner,
seg.index_name
from
my_segments seg
)
or (owner, segment_name) in
( select
seg.table_owner,
seg.lob_segment
from
my_segments seg
)
or (owner, segment_name) in
( select
seg.table_owner,
seg.lob_ind_segment
from
my_segments seg
)
group by
segment_type
;
clear computes
clear breaks
undef owner
undef table_name

14
vdh/calibrate_io.sql Normal file
View File

@@ -0,0 +1,14 @@
SET SERVEROUTPUT ON
DECLARE
lat INTEGER;
iops INTEGER;
mbps INTEGER;
BEGIN
-- DBMS_RESOURCE_MANAGER.CALIBRATE_IO (<DISKS>, <MAX_LATENCY>, iops, mbps, lat);
DBMS_RESOURCE_MANAGER.CALIBRATE_IO (&no_of_disks, 10, iops, mbps, lat);
DBMS_OUTPUT.PUT_LINE ('max_iops = ' || iops);
DBMS_OUTPUT.PUT_LINE ('latency = ' || lat);
dbms_output.put_line('max_mbps = ' || mbps);
end;
/

View File

@@ -0,0 +1,80 @@
column counted format 9G999G999G999
accept OWNER prompt 'Owner Name: '
break on report on stattype_locked skip 2 on last_analyzed_day skip 1
compute sum of counted on last_analyzed_day
compute sum of counted on stattype_locked
compute sum of counted on report
select
stattype_locked,
trunc(last_analyzed) last_analyzed_day,
object_type,
count(*) counted
from
( select
object_type,
last_analyzed,
stattype_locked
from
dba_tab_statistics
where
owner = '&OWNER'
and table_name not in
( select
table_name
from
dba_external_tables
where
owner = '&OWNER'
union all
select
table_name
from
dba_tables
where
temporary = 'Y'
and owner = '&OWNER'
)
union all
select
object_type,
last_analyzed,
stattype_locked
from
dba_ind_statistics
where
owner = '&OWNER'
and table_name not in
( select
table_name
from
dba_external_tables
where
owner = '&OWNER'
union all
select
table_name
from
dba_tables
where
temporary = 'Y'
and owner = '&OWNER'
)
)
group by
stattype_locked,
trunc(last_analyzed),
object_type
order by
stattype_locked,
last_analyzed_day,
object_type
;
clear breaks
clear computes
undef OWNER

406
vdh/cdc_healthcheck.sql Normal file
View File

@@ -0,0 +1,406 @@
REM
REM This script collects details related to CDC setup and activity.
REM
REM It is recommended to run with markup html ON (default is on) and generate an
REM HTML file for web viewing.
REM Please provide the output in HTML format when Oracle (support or development) requests
REM CDC configuration details.
REM
REM NOTE:
REM This main consideration of this note is to provide configuration details although
REM some performance detail is provided. The note should be used in conjunction with
REM the Streams Healthcheck -
REM <Note:273674.1> Streams Configuration Report and Health Check Script which also provides
REM detailed performance inforation relating to Capture and Apply processes.
REM
REM To convert output to a text file viewable with a text editor,
REM change the HTML ON to HTML OFF in the set markup command
REM Remember to set up a spool file to capture the output
REM
-- connect / as sysdba
set markup HTML ON entmap off
alter session set nls_date_format='HH24:Mi:SS MM/DD/YY';
set heading off
select 'CDC Configuration Check (V1.0.0) for '||global_name||' on Instance='||instance_name||' generated: '||sysdate o from global_name, v$instance;
set heading on timing off
set pages 9999
prompt Publishers: <a href="#Publishers"> Publishers </a> <a href="#PubPrivs"> Privileges </a>
prompt Change Sets: <a href="#ChangeSets"> Change Sets </a> <a href="#ChangeSources"> Change Sources </a> <a href="#ChangeTabs"> Change Tables </a>
prompt Subscribers: <a href="#ChangeSetSubs"> Change Sets </a> <a href="#ChangeTabs"> Change Tables </a> <a href="#ChangeSetTabSubs"> Views </a>
prompt Processes: <a href="#CapDistHotLog"> Capture </a> <a href="#ApplyProc"> Apply </a> <a href="#Propagation"> Propagation </a>
prompt Processes: <a href="#AddAutoLog"> Additional Autolog Details </a>
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="Database">Database Information</a> ++
COLUMN MIN_LOG FORMAT A7
COLUMN PK_LOG FORMAT A6
COLUMN UI_LOG FORMAT A6
COLUMN FK_LOG FORMAT A6
COLUMN ALL_LOG FORMAT A6
COLUMN FORCE_LOG FORMAT A10
COLUMN archive_change# format 999999999999999999
COLUMN archivelog_change# format 999999999999999999
COLUMN NAME HEADING 'Name'
COLUMN platform_name format a30 wrap
COLUMN current_scn format 99999999999999999
SELECT DBid,name,created,
SUPPLEMENTAL_LOG_DATA_MIN MIN_LOG,SUPPLEMENTAL_LOG_DATA_PK PK_LOG,
SUPPLEMENTAL_LOG_DATA_UI UI_LOG,
SUPPLEMENTAL_LOG_DATA_FK FK_LOG,
SUPPLEMENTAL_LOG_DATA_ALL ALL_LOG,
FORCE_LOGGING FORCE_LOG,
resetlogs_time,log_mode, archive_change#,
open_mode,database_role,archivelog_change# , current_scn, platform_id, platform_name from v$database;
prompt ============================================================================================
prompt
prompt ++ <a name="Parameters">Parameters</a> ++
column NAME format a30
column VALUE format a30
select NAME, VALUE from v$parameter where name in ('java_pool_size','compatible','parallel_max_servers','job_queue_processes',
'aq_tm_processes','processes','sessions','streams_pool_size','undo_retention','open_links','global_names','remote_login_passwordfile');
prompt ++ <a name="aq_tm_processes">AQ_TM_PROCESSES should indicate QMON AUTO TUNING IN FORCE</a> ++
column NAME format a20
column NULL? format a20
select inst_id, name, nvl(value,'AUTO TUNING IN OPERATION') "SHOULD INDICATE AUTO TUNING"
from gv$spparameter
where name = 'aq_tm_processes';
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="Publishers">Publishers</a> ++
COLUMN PUBLISHER HEADING 'Change Set Publishers' FORMAT A30
select distinct PUBLISHER from change_sets where PUBLISHER is not null;
COLUMN PUBLISHER HEADING 'Change Source Publishers' FORMAT A30
select distinct PUBLISHER from change_sources where PUBLISHER is not null;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="PubPrivs">Publishers Privilieges</a> ++
prompt ++ <a name="PubRoles">--Roles</a> ++
COLUMN GRANTEE HEADING 'GRANTEE' FORMAT A30
COLUMN GRANTED_ROLE HEADING 'GRANTED_ROLE' FORMAT A30
COLUMN ADMIN_OPTION HEADING 'ADMIN_OPTION' FORMAT A3
COLUMN DEFAULT_ROLE HEADING 'DEFAULT_ROLE' FORMAT A3
select GRANTEE ,GRANTED_ROLE,ADMIN_OPTION,DEFAULT_ROLE from dba_role_privs
where GRANTEE in (select distinct PUBLISHER from change_sets where PUBLISHER is not null)
or GRANTEE in (select distinct PUBLISHER from change_sources where PUBLISHER is not null)
order by GRANTEE;
prompt
prompt ++ <a name="SysPrivs">--System Privilieges</a> ++
prompt
COLUMN GRANTEE HEADING 'GRANTEE' FORMAT A30
COLUMN PRIVILEGE HEADING 'PRIVILEGE' FORMAT A40
COLUMN ADMIN_OPTION HEADING 'ADMIN_OPTION' FORMAT A3
select GRANTEE,PRIVILEGE,ADMIN_OPTION from dba_sys_privs
where GRANTEE in (select distinct PUBLISHER from change_sets where PUBLISHER is not null)
or GRANTEE in (select distinct PUBLISHER from change_sources where PUBLISHER is not null)
order by GRANTEE;
prompt
prompt ++ <a name="TabPrivs">--Table Privilieges</a> ++
COLUMN GRANTEE format a20
COLUMN TABLE_NAME format a40
COLUMN PRIVILEGE format a10
select GRANTEE, OWNER||'.'||TABLE_NAME "TABLE_NAME", PRIVILEGE
from dba_tab_privs
where GRANTEE in (select distinct PUBLISHER from change_sets where PUBLISHER is not null)
or GRANTEE in (select distinct PUBLISHER from change_sources where PUBLISHER is not null)
order by GRANTEE, TABLE_NAME;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="SuppLogging">Tables with Supplemental Logging</a> ++
column OWNER format a30
column TABLE_NAME format a30 wrap
column TABLE Format a25
select owner||'.'||table_name "TABLE", LOG_GROUP_NAME, LOG_GROUP_TYPE from DBA_LOG_GROUPS;
prompt
prompt ++ <a name="SuppLogCols">Supplemental logging columns</a> ++
COLUMN LOG_GROUP_NAME format a25
COLUMN COLUMN_NAME format a25
select owner||'.'||table_name "Table", LOG_GROUP_NAME, COLUMN_NAME from DBA_LOG_GROUP_COLUMNS;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeSets">Change Sets</a> ++
prompt
column PUBLISHER format a20
column SET_NAME format a20
column CHANGE_SOURCE_NAME format a20
select PUBLISHER, SET_NAME, CHANGE_SOURCE_NAME, CREATED from change_sets;
prompt
prompt ++ <a name="ChangeSets">Change Set Status</a> ++
prompt
column c HEADING 'Capture|Enabled' format a7
column LOWEST_SCN format 999999999999999999
select PUBLISHER, SET_NAME, CAPTURE_ENABLED c, PURGING, BEGIN_DATE, END_DATE, LOWEST_SCN, STOP_ON_DDL from change_sets;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeSources">Change Sources</a> ++
prompt
column SOURCE_NAME format a15
column SOURCE_DESCRIPTION format a30
column SOURCE_DATABASE format a40
select SOURCE_NAME, SOURCE_DESCRIPTION, CREATED, SOURCE_TYPE, SOURCE_DATABASE, SOURCE_ENABLED "Enabled"
from change_sources;
prompt ============================================================================================
prompt
prompt ++ <a name="TabPrep">Tables Prepared for Capture</a> ++
prompt
COLUMN table_owner format a30 HEADING 'Table|Owner'
COLUMN table_name format a30 HEADING 'Table|Name'
COLUMN timestamp heading 'Timestamp'
COLUMN supplemental_log_data_pk HEADING 'PK|Logging'
COLUMN supplemental_log_data_ui HEADING 'UI|Logging'
COLUMN supplemental_log_data_fk HEADING 'FK|Logging'
COLUMN supplemental_log_data_all HEADING 'All|Logging'
select * from dba_capture_prepared_tables order by table_owner,table_name;
prompt ++ <a name="SchemaPrep">Schemas Prepared for Capture</a> ++
select * from dba_capture_prepared_schemas order by schema_name;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="CapDistHotLog">Capture / Distributed HotLog Case</a> ++
select s.SOURCE_NAME, c.CAPTURE_NAME, c.QUEUE_NAME, c.STATUS
from change_sources s, dba_capture c
where s.capture_name=c.capture_name
and s.capture_queue_name=c.queue_name
and s.source_database=c.source_database
and s.publisher=c.queue_owner;
prompt
prompt ++ <a name="CapProcDistHotLog">Capture process current state</a> ++
column CAPTURE_NAME format a25
column SET_NAME format a20
column STATE format a60
column TOTAL_MESSAGES_CAPTURED HEADING 'Captured|MSGs'
column TOTAL_MESSAGES_ENQUEUED HEADING 'Enqueued|MSGs'
select s.PUBLISHER, s.SOURCE_NAME, c.capture_name, c.state
from v$streams_capture c, change_sources s
where c.capture_name = s.capture_name;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="HotLog">Capture / other configurations</a> ++
Column PUBLISHER format a10
column change_set format a20
column CAPTURE_NAME format a25
column queue_name format a25
column ERROR_MESSAGE format a30
select PUBLISHER, s.SET_NAME change_set, c.CAPTURE_NAME, c.QUEUE_NAME, STATUS, ERROR_MESSAGE
from dba_capture c, change_sets s
where c.capture_name=s.capture_name
and c.queue_name = s.queue_name
and c.queue_owner = s.publisher;
prompt
prompt ++ <a name="CapHotLog">Capture process current state</a> ++
column CAPTURE_NAME format a25
column SET_NAME format a20
column STATE format a60
column TOTAL_MESSAGES_CAPTURED HEADING 'Captured|MSGs'
column TOTAL_MESSAGES_ENQUEUED HEADING 'Enqueued|MSGs'
select s.PUBLISHER, s.SET_NAME, c.capture_name, c.state
from v$streams_capture c, change_sets s
where c.capture_name = s.capture_name;
prompt ============================================================================================
prompt
prompt ++ <a name="BuffPubs">Buffered Publishers</a> ++
select * from gv$buffered_publishers;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ApplyProc">Apply process re Change Sets</a> ++
prompt
Column PUBLISHER format a15
column change_set format a20
column apply_name format a25
column queue_name format a25
column ERROR_MESSAGE format a30
select PUBLISHER, s.SET_NAME change_set, a.APPLY_NAME, a.QUEUE_NAME, STATUS, ERROR_MESSAGE
from dba_apply a, change_sets s
where a.apply_name=s.apply_name
and a.queue_name = s.queue_name
and a.queue_owner = s.publisher;
prompt
prompt ++ <a name="BuffSubs">Buffered Subscribers</a> ++
select * from gv$buffered_subscribers;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="BuffQs">Buffered Queues</a> ++
column QUEUE format a40
select QUEUE_SCHEMA||'.'||QUEUE_NAME Queue, NUM_MSGS-SPILL_MSGS "Memory MSGs", SPILL_MSGS "Spilled", CNUM_MSGS Cummulative
from v$buffered_queues;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="Propagation">Propagation</a> ++
column "Source Queue" format a30
column "Dest Queue" format a35
column "Destination Name" format a35
select PROPAGATION_NAME, DESTINATION_DBLINK "Destination Name", SOURCE_QUEUE_OWNER||'.'||SOURCE_QUEUE_NAME "Source Queue",
DESTINATION_QUEUE_OWNER||'.'||DESTINATION_QUEUE_NAME "Dest Queue",
STATUS, ERROR_MESSAGE
from dba_propagation;
prompt ++ <a name="QueueSched">Queue Schedules</a> ++
column SOURCE format a30
column DESTINATION format a65
column LAST_ERROR_MSG format a30
select SCHEMA||'.'||QNAME "SOURCE", DESTINATION, SCHEDULE_DISABLED, TOTAL_NUMBER, LAST_ERROR_MSG, LAST_ERROR_DATE
from dba_queue_schedules;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeTabs">Change Tables</a> ++
column "Change Set" format a20
column "Change Table" format a25
column "Source Table" format a25
column CREATED_SCN format 999999999999999999
select CHANGE_SET_NAME "Change Set", CHANGE_TABLE_SCHEMA||'.'||CHANGE_TABLE_NAME "Change Table",
SOURCE_SCHEMA_NAME||'.'||SOURCE_TABLE_NAME "Source Table",
CAPTURED_VALUES, CREATED, CREATED_SCN
from change_tables;
prompt
prompt ++ <a name="ChangeColsPerTab">Change Columns For Each Change Table</a> ++
column "Change Table" format a30
column COLUMN_NAME format a20
column DATA_TYPE format a10
select CHANGE_TABLE_SCHEMA||'.'||CHANGE_TABLE_NAME "Change Table", PUB_ID, COLUMN_NAME, DATA_TYPE
from DBA_PUBLISHED_COLUMNS
order by "Change Table", PUB_ID, COLUMN_NAME;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="Users">Users Eligible to be Subscribers for Change Tables</a> ++
column "User" format a15
column "Change Set" format a20
column "Channge Table" format a20
column "Source Table" format a20
select t.GRANTEE "User", CHANGE_SET_NAME "Change Set", t.OWNER||'.'||t.TABLE_NAME "Channge Table", s.SOURCE_SCHEMA_NAME||'.'||s.SOURCE_TABLE_NAME "Source Table"
from dba_tab_privs t, dba_source_tables s, change_tables c
where t.PRIVILEGE ='SELECT'
and t.OWNER=c.CHANGE_TABLE_SCHEMA
and t.TABLE_NAME=c.CHANGE_TABLE_NAME
and c.SOURCE_SCHEMA_NAME=s.SOURCE_SCHEMA_NAME
and c.SOURCE_TABLE_NAME=s.SOURCE_TABLE_NAME;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeSetSubs">Change Set Subscribers</a> ++
column "Subscriber" format a15
column SUBSCRIPTION_NAME format a20
column "Change Set" format a20
column LAST_EXTENDED heading "Extended"
column LAST_PURGED heading "Purged"
select USERNAME "Subscriber", SET_NAME "Change Set", SUBSCRIPTION_NAME, CREATED, LAST_EXTENDED, LAST_PURGED
FROM DBA_SUBSCRIPTIONS;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeSetTabSubs">Change Set Tables Subscriptions / Subscribers Views</a> ++
column VIEW_NAME format a15
column "Change Table" format a20
select s.SUBSCRIPTION_NAME, s.VIEW_NAME,
c.CHANGE_TABLE_SCHEMA||'.'||c.CHANGE_TABLE_NAME "Change Table",
s.CHANGE_SET_NAME "Change Set"
from DBA_SUBSCRIBED_TABLES s, CHANGE_TABLES c
where c.SOURCE_SCHEMA_NAME = s.SOURCE_SCHEMA_NAME
and c.SOURCE_TABLE_NAME = s.SOURCE_TABLE_NAME
and c.CHANGE_SET_NAME = s.CHANGE_SET_NAME;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="ChangeTabColSubs">Change Table / Columns Subscriptions</a> ++
column CHANGE_SET_NAME format a20
column COLUMN_NAME format a15
column DATA_TYPE format a10
select sc.SUBSCRIPTION_NAME, s.VIEW_NAME, s.CHANGE_SET_NAME, c.COLUMN_NAME, c.DATA_TYPE, c.PUB_ID
from DBA_SUBSCRIBED_COLUMNS sc, DBA_SUBSCRIBED_TABLES s, DBA_PUBLISHED_COLUMNS c
where sc.SOURCE_SCHEMA_NAME = s.SOURCE_SCHEMA_NAME
and sc.SOURCE_TABLE_NAME = s.SOURCE_TABLE_NAME
and sc.SOURCE_TABLE_NAME = c.SOURCE_TABLE_NAME
and sc.SOURCE_SCHEMA_NAME = c.SOURCE_SCHEMA_NAME
and s.CHANGE_SET_NAME = c.CHANGE_SET_NAME
and sc.COLUMN_NAME = c.COLUMN_NAME
order by sc.SUBSCRIPTION_NAME, s.VIEW_NAME, s.CHANGE_SET_NAME, c.COLUMN_NAME;
prompt
prompt ============================================================================================
prompt
prompt ++ <a name="AddAutoLog">Additional Information relevant to AutoLog</a> ++
prompt +++ <a name="AddAutoLogParms">Autolog Parameters</a> ++
column NAME format a30
column VALUE format a30
select NAME, VALUE from v$parameter where name like 'log_archive%';
prompt
prompt ++ <a name="Logfiles">Logfiles</a> ++
select THREAD#, GROUP#, BYTES/1024/1024 from V$LOG;
prompt
prompt ++ <a name="Standby Logs">Standbylogs</a> ++
SELECT GROUP#, THREAD#, SEQUENCE#, BYTES/1024/1024, ARCHIVED, STATUS FROM V$STANDBY_LOG;
set timing off
set markup html off
clear col
clear break
spool
prompt End Of Script
spool off

View File

@@ -0,0 +1,16 @@
BEGIN
for big_seg in
( select owner, segment_name, segment_type
from dba_segments
where bytes > 100 *1024*1024
and segment_type in ('INDEX', 'TABLE')
)
loop
dbms_output.put_line( 'alter ' || big_seg.segment_type || ' ' ||
big_seg.owner || '.' || big_seg.segment_name ||
' storage (pctincrease 0);');
end loop;
END;

View File

@@ -0,0 +1,77 @@
-- check for non deferrable unique/primary constraints that are supported by non unique indexes
-- because of the wide output it is best to spool it
set linesize 200
column owner format a30
break on owner skip 1 on table_name
select con.owner, con.table_name, con.constraint_name, con.constraint_type, index_owner, con.index_name
from dba_indexes ind,
dba_constraints con
where ind.owner = con.index_owner
and ind.index_name = con.index_name
and ind.uniqueness != 'UNIQUE'
and con.constraint_type in ('U', 'P')
and con.deferrable = 'NOT DEFERRABLE'
order by con.owner, con.table_name, con.constraint_name;
clear breaks
column column_name format a30
column column_position format 99 heading CP
column uniqueness format a1 heading U
column index_type format a10
break on owner skip 1 on table_name on index_name on index_type on uniqueness on status
select ind.owner, ind.table_name, ind.index_name, ind.index_type, decode(ind.uniqueness,'UNIQUE', 'Y', 'N') uniqueness,
ind.status, inc.column_name, inc.column_position, ine.column_expression
from dba_indexes ind, dba_ind_columns inc, dba_ind_expressions ine
where ind.owner = inc.index_owner
and ind.index_name = inc.index_name
and inc.index_owner = ine.index_owner(+)
and inc.index_name = ine.index_name(+)
and inc.column_position = ine.column_position(+)
and (ind.owner, ind.index_name) in
( select con.index_owner, con.index_name
from dba_indexes ind,
dba_constraints con
where ind.owner = con.index_owner
and ind.index_name = con.index_name
and ind.uniqueness != 'UNIQUE'
and con.constraint_type in ('U', 'P')
and con.deferrable = 'NOT DEFERRABLE'
)
order by ind.owner, ind.table_name, ind.index_name, inc.column_position;
clear breaks
set long 30
column constraint_name format a30
column constraint_type format a2 heading CT
column column_name format a30
column position format 99 heading CP
break on owner skip 1 on table_name on constraint_name on constraint_type on status
select con.owner, con.table_name, con.constraint_name, con.constraint_type, con.status, col.column_name, col.position
from dba_constraints con,
dba_cons_columns col,
dba_indexes ind
where col.owner = con.owner
and col.constraint_name = con.constraint_name
and ind.owner = con.index_owner
and ind.index_name = con.index_name
and ind.uniqueness != 'UNIQUE'
and con.constraint_type in ('U', 'P')
and con.deferrable = 'NOT DEFERRABLE'
order by con.owner, con.table_name, con.constraint_name, col.position;
clear breaks

7
vdh/check_px.sql Normal file
View File

@@ -0,0 +1,7 @@
column statistic format a50
column value for 999,999
select *
from V$PX_PROCESS_SYSSTAT
where statistic like '%In Use%';

View File

@@ -0,0 +1,3 @@
tracle_level_client = admin
trace_unique_client = on
trace_timestamp_client = on

View File

@@ -0,0 +1,5 @@
tracle_level_client = admin
trace_unique_client = on
trace_timestamp_client = on
trace_file_client = cli
trace_directory_client = E:\oracle\OraHome\network\trace

1
vdh/cnt_proc_per_user.sh Normal file
View File

@@ -0,0 +1 @@
ps -ef | awk '{if (NR > 1) {users[$1]++}} END {for (user in users) { printf "%-20s %d\n", user, users[user]}}'

86
vdh/cols_as_rows.sql Normal file
View File

@@ -0,0 +1,86 @@
create or replace type obj_cols_as_rows as object
( rnum number,
cname varchar2(30),
val varchar2(4000)
)
/
create or replace type tab_cols_as_rows
as table of obj_cols_as_rows
/
create or replace
function cols_as_rows
( p_query in varchar2
) RETURN tab_cols_as_rows
AUTHID CURRENT_USER
PIPELINED
AS
l_theCursor integer default dbms_sql.open_cursor;
l_columnValue varchar2(4000);
l_status integer;
l_colCnt number default 0;
l_descTbl dbms_sql.desc_tab;
l_rnum number := 1;
BEGIN
dbms_sql.parse
( c => l_theCursor,
statement => p_query,
language_flag => dbms_sql.native
);
dbms_sql.describe_columns
( c => l_theCursor,
col_cnt => l_colCnt,
desc_t => l_descTbl
);
for i in 1 .. l_colCnt loop
dbms_sql.define_column
( c => l_theCursor,
position => i,
column => l_columnValue,
column_size => 4000
);
end loop;
l_status := dbms_sql.execute
( c => l_theCursor
);
while ( dbms_sql.fetch_rows
( c => l_theCursor
) > 0
)
loop
for i in 1 .. l_colCnt
loop
dbms_sql.column_value
( c => l_theCursor,
position => i,
value => l_columnValue
);
pipe row ( obj_cols_as_rows
( l_rnum,
l_descTbl(i).col_name,
l_columnValue
)
);
end loop;
l_rnum := l_rnum + 1;
end loop;
dbms_sql.close_cursor
( c => l_theCursor
);
return;
end cols_as_rows;
/

17
vdh/comp_ratio.sql Normal file
View File

@@ -0,0 +1,17 @@
set lines 155
compute sum of totalsize_megs on report
break on report
col owner for a10
col segment_name for a20
col segment_type for a10
col totalsize_megs for 999,999.9
col compression_ratio for 999.9
select owner, segment_name, segment_type type,
sum(bytes/1024/1024) as totalsize_megs,
&original_size/sum(bytes/1024/1024) as compression_ratio
from dba_segments
where owner like nvl('&owner',owner)
and segment_name like nvl('&table_name',segment_name)
and segment_type like nvl('&type',segment_type)
group by owner, segment_name, tablespace_name, segment_type
order by 5;

781
vdh/compare_schema.sql Normal file
View File

@@ -0,0 +1,781 @@
/*
This script compares the object definitions in the current schema
to that of a remote schema.
The remote schema is defined using a database link.
THE SCRIPT COMPARES THE FOLLOWING:
- Existence of tables
- Existence of columns
- Column definitions
- Existence of indexes
- Index definitions (column usage)
- Existence of constraints
- Constraint definitions (table, type and reference)
- Constraint column usage (for unique, primary key and foreign keys)
- Check constraint definitions
- Existence of triggers
- Definition of triggers
- Existence of procedure/packages/functions
- Definition of procedures/packages/functions
(Ie. the script does not do a complete check, it does not for example
check any grants, synonyms, clusters or storage definitions).
The script drops and creates a few temporary objects prefixed with
the first 3 letter combination (AAA - ZZZ) that does not conflict with any
existing objects.
If you find ways of improving this script or have any comments and/or
problems, please send a mail to the author.
This script has been tested on Oracle 7.3.
*/
undef prex
undef prefx
undef a
undef thisuser
undef b
undef REMOTESCHEMA
undef REMOTEPASSW
undef connstring
undef c
undef todaysdate
variable prefx varchar2(3)
declare
i number ;
j number ;
k number ;
cnt number;
begin
for i in ascii('A') .. ascii('Z') loop
for j in ascii('A') .. ascii('Z') loop
for k in ascii('A') .. ascii('Z') loop
select count(*) into cnt from user_objects where object_name like
chr(i)||chr(j)||chr(k)||'%';
if cnt = 0 then
:prefx := chr(i)||chr(j)||chr(k);
return;
end if;
end loop;
end loop;
end loop;
end;
/
column a new_val prex
set verify off
set linesize 132
set feedback off
select :prefx a from dual;
column b new_val thisuser
select user b from dual;
column c new_val todaysdate
select to_char(sysdate,'DD-MON-YYYY HH24:MI') c from dual;
accept REMOTESCHEMA char prompt 'Enter remote username:'
accept REMOTEPASSW char prompt 'Enter remote password:' hide
accept connstring char prompt 'Enter remote connectstring:'
spool dbdiff
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT SCHEMA DEFINITION DIFFERENCES &todaysdate
PROMPT
PROMPT this schema: &thisuser
PROMPT remote schema: &remoteschema.@&connstring
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT
PROMPT
create database link &prex.lnk connect to &REMOTESCHEMA identified
by &REMOTEPASSW using '&CONNSTRING';
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT TABLES MISSING IN THIS SCHEMA:
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
create table &prex.common_tables as
select table_name from user_TAbles@&prex.lnk
intersect
select table_name from user_tables;
select table_name from user_TAbles@&prex.lnk
minus
select table_name from &prex.common_tables;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT TABLES MISSING IN REMOTE SCHEMA:
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name from user_TAbles where table_name not like '&prex.%'
minus
select table_name from user_tables@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT COLUMNS MISSING IN THIS SCHEMA FOR COMMON TABLES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name,column_name from user_tab_columns@&prex.lnk
where table_name in
(select table_name from &prex.common_tables)
minus
select table_name,column_name from user_tab_columns
where table_name in
(select table_name from &prex.common_tables);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT COLUMNS MISSING IN REMOTE SCHEMA FOR COMMON TABLES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name,column_name from user_tab_columns
where table_name in
(select table_name from &prex.common_tables)
minus
select table_name,column_name from user_tab_columns@&prex.lnk
where table_name in
(select table_name from &prex.common_tables);
create table &prex.diff_cols1
( TABLE_NAME VARCHAR2(30),
COLUMN_NAME VARCHAR2(30),
DATA_TYPE VARCHAR2(9),
DATA_LENGTH NUMBER,
DATA_PRECISION NUMBER,
DATA_SCALE NUMBER,
NULLABLE VARCHAR2(1),
COLUMN_ID NUMBER,
DEFAULT_LENGTH NUMBER,
DATA_DEFAULT varchar2(2000));
create table &prex.diff_cols2
( TABLE_NAME VARCHAR2(30),
COLUMN_NAME VARCHAR2(30),
DATA_TYPE VARCHAR2(9),
DATA_LENGTH NUMBER,
DATA_PRECISION NUMBER,
DATA_SCALE NUMBER,
NULLABLE VARCHAR2(1),
COLUMN_ID NUMBER,
DEFAULT_LENGTH NUMBER,
DATA_DEFAULT varchar2(2000));
declare
cursor c1 is
select
l.TABLE_NAME ,
l.COLUMN_NAME,
l.DATA_TYPE ,
l.DATA_LENGTH,
l.DATA_PRECISION ,
l.DATA_SCALE ,
l.NULLABLE,
l.COLUMN_ID ,
l.DEFAULT_LENGTH ,
l.DATA_DEFAULT
from user_tab_columns l,&prex.common_tables c
where c.table_name=l.table_name ;
TYPE rec is record (
TABLE_NAME VARCHAR2(30),
COLUMN_NAME VARCHAR2(30),
DATA_TYPE VARCHAR2(9),
DATA_LENGTH NUMBER,
DATA_PRECISION NUMBER,
DATA_SCALE NUMBER,
NULLABLE VARCHAR2(1),
COLUMN_ID NUMBER,
DEFAULT_LENGTH NUMBER,
DATA_DEFAULT varchar2(2000)
);
c rec;
begin
open c1;
loop
fetch c1 into c;
exit when c1%NOTFOUND;
insert into &prex.diff_cols1 values
(c.table_name,c.column_name,c.data_type,c.data_length,
c.DATA_PRECISION, c.DATA_SCALE, c.NULLABLE, c.COLUMN_ID,
c.DEFAULT_LENGTH, c.DATA_DEFAULT);
end loop;
end;
/
declare
cursor c1 is
select
l.TABLE_NAME ,
l.COLUMN_NAME,
l.DATA_TYPE ,
l.DATA_LENGTH,
l.DATA_PRECISION ,
l.DATA_SCALE ,
l.NULLABLE,
l.COLUMN_ID ,
l.DEFAULT_LENGTH ,
l.DATA_DEFAULT
from user_tab_columns@&prex.lnk l,&prex.common_tables c
where c.table_name=l.table_name ;
TYPE rec is record (
TABLE_NAME VARCHAR2(30),
COLUMN_NAME VARCHAR2(30),
DATA_TYPE VARCHAR2(9),
DATA_LENGTH NUMBER,
DATA_PRECISION NUMBER,
DATA_SCALE NUMBER,
NULLABLE VARCHAR2(1),
COLUMN_ID NUMBER,
DEFAULT_LENGTH NUMBER,
DATA_DEFAULT varchar2(2000)
);
c rec;
begin
open c1;
loop
fetch c1 into c;
exit when c1%NOTFOUND;
insert into &prex.diff_cols2 values
(c.table_name,c.column_name,c.data_type,c.data_length,
c.DATA_PRECISION, c.DATA_SCALE, c.NULLABLE, c.COLUMN_ID,
c.DEFAULT_LENGTH, c.DATA_DEFAULT);
end loop;
end;
/
column table_name format a20
column column_name format a20
column param format a15
column local_value format a20
column remote_value format a20
set arraysize 1
set maxdata 32000
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT DIFFERENCE IN COLUMN-DEFS
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select l.table_name,l.column_name,'DATA_DEFAULT' param ,
l.DATA_DEFAULT local_value, r.DATA_DEFAULT remote_value
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.DATA_DEFAULT != r.DATA_DEFAULT
union
select l.table_name,l.column_name,'DATA_TYPE',l.data_type,r.data_type
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.data_type != r.data_type
union
select l.table_name,l.column_name,'DATA_LENGTH',to_char(l.data_length),
to_char(r.data_length)
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.data_length != r.data_length
union
select l.table_name,l.column_name,'DATA_PRECISION',
to_char(l.DATA_PRECISION),to_char(r.DATA_PRECISION)
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.DATA_PRECISION != r.DATA_PRECISION
union
select l.table_name,l.column_name,'DATA_SCALE',to_char(l.DATA_SCALE),
to_char(r.DATA_SCALE)
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.DATA_SCALE != r.DATA_SCALE
union
select l.table_name,l.column_name,'NULLABLE',l.NULLABLE,r.NULLABLE
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.NULLABLE != r.NULLABLE
union
select l.table_name,l.column_name,'COLUMN_ID',to_char(l.COLUMN_ID),
to_char(r.COLUMN_ID)
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.COLUMN_ID != r.COLUMN_ID
union
select l.table_name,l.column_name,'DEFAULT_LENGTH',to_char(l.DEFAULT_LENGTH),
to_char(r.DEFAULT_LENGTH)
from &prex.diff_cols1 l, &prex.diff_cols2 r
where l.table_name=r.table_name and
l.column_name=r.column_name and l.DEFAULT_LENGTH != r.DEFAULT_LENGTH
order by 1,2
/
create table &prex.common_indexes as
select table_name, index_name from user_indexes@&prex.lnk
where table_name in (select table_name from &prex.common_tables)
intersect
select table_name, INdex_name from user_indexes
where table_name in (select table_name from &prex.common_tables);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT INDEXES MISSING IN THIS SCHEMA FOR COMMON TABLES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name, index_name from user_indexes@&prex.lnk
where table_name in (select table_name from &prex.common_tables)
minus
select table_name, index_name from &prex.common_indexes;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT INDEXES MISSING IN REMOTE SCHEMA FOR COMMON TABLES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name, index_name from user_indexes
where table_name in (select table_name from &prex.common_tables)
minus
select table_name, index_name from &prex.common_indexes;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT COMMON INDEXES WITH DIFFERENT UNIQUENESS
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select a.table_name, a.index_name, a.uniqueness local, b.uniqueness remote
from user_indexes a,
user_indexes@&prex.lnk b
where a.index_name = b.index_name
and a.uniqueness != b.uniqueness
and (a.table_name, a.index_name) in
(select table_name, index_name from &prex.common_indexes);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT INDEX COLUMNS MISSING IN THIS SCHEMA FOR COMMON INDEXES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select index_name, column_name from user_ind_columns@&prex.lnk
where (table_name,index_name) in
(select table_name,index_name from &prex.common_indexes)
minus
select index_name, column_name from user_ind_columns
where (table_name,index_name) in
(select table_name,index_name from &prex.common_indexes);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT INDEX COLUMNS MISSING IN REMOTE SCHEMA FOR COMMON INDEXES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select index_name, column_name from user_ind_columns
where (table_name,index_name) in
(select table_name,index_name from &prex.common_indexes)
minus
select index_name, column_name from user_ind_columns@&prex.lnk
where (table_name,index_name) in
(select table_name,index_name from &prex.common_indexes);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT INDEX COLUMNS POSITIONED DIFFERENTLY FOR COMMON INDEXES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select a.index_name, a.column_name, a.column_position local,
b.column_position remote
from user_ind_columns a,
user_ind_columns@&prex.lnk b
where (a.table_name,a.index_name) in
(select table_name,index_name from &prex.common_indexes)
and b.index_name = a.index_name
and b.table_name = a.table_name
and a.column_name = b.column_name
and a.column_position != b.column_position;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT CONSTRAINTS MISSING IN THIS SCHEMA FOR COMMON TABLES
PROMPT (WORKS ONLY FOR CONSTRAINT WITH NON SYSTEM GENERATED NAMES)
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name,constraint_name from user_constraints@&prex.lnk
where constraint_name not like 'SYS%' and table_name in
(select table_name from &prex.common_tables)
minus
select table_name,constraint_name from user_constraints
where constraint_name not like 'SYS%' and table_name in
(select table_name from &prex.common_tables);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT CONSTRAINTS MISSING IN REMOTE SCHEMA FOR COMMON TABLES
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select table_name,constraint_name from user_constraints
where constraint_name not like 'SYS%' and table_name in
(select table_name from &prex.common_tables)
minus
select table_name,constraint_name from user_constraints@&prex.lnk
where constraint_name not like 'SYS%' and table_name in
(select table_name from &prex.common_tables);
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT COMMON CONSTRAINTS, TYPE MISMATCH
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select a.constraint_name,a.constraint_type local_type,
b.constraint_type remote_type
from user_constraints a, user_constraints@&prex.lnk b where
a.table_name = b.table_name and
a.constraint_name=b.constraint_name and
a.constraint_type !=b.constraint_type;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT COMMON CONSTRAINTS, TABLE MISMATCH
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select a.constraint_name,a.table_name,b.table_name from
user_constraints a, user_constraints@&prex.lnk b,
(select z.constraint_name from
(select constraint_name, table_name from useR_constraints union
select constraint_name, table_name from user_constraints@&prex.lnk) z
group by constraint_name having count(*) >1) q
where a.constraint_name = q.constraint_name and
b.constraint_name=q.constraint_name
and a.table_name != b.table_name;
create table &prex.comcons as
select constraint_name, constraint_type, table_name
from useR_constraints
intersect
select constraint_name, constraint_type, table_name
from user_constraints@&prex.lnk;
delete from &prex.comcons where constraint_name in
(select constraint_name from &prex.comcons
group by constraint_name having count(*) > 1);
delete from &prex.comcons where constraint_name like 'SYS%';
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT DIFFERENCES IN COLUMN USAGE FOR CONSTRAINT DEFS
PROMPT (Unique key, Primary Key, Foreign key)
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
declare
cursor c1 is
select a.constraint_name,a.position,a.column_name,b.constraint_type
from user_cons_columns a, &prex.comcons b
where a.constraint_name=b.constraint_name
union
select a.constraint_name,a.position,a.column_name,b.constraint_type
from user_cons_columns@&prex.lnk a, &prex.comcons b
where a.constraint_name=b.constraint_name
minus
(select a.constraint_name,a.position,a.column_name,b.constraint_type
from user_cons_columns a, &prex.comcons b
where a.constraint_name=b.constraint_name
intersect
select a.constraint_name,a.position,a.column_name,b.constraint_type
from user_cons_columns@&prex.lnk a, &prex.comcons b
where a.constraint_name=b.constraint_name
);
i binary_integer;
begin
for c in c1 loop
dbms_output.put_line('COLUMN USAGE DIFFERENCE FOR '||c.constraint_type||
' CONSTRAINT '||c.constraint_name);
dbms_output.put_line('. Local columns:');
i:=1;
for c2 in (select column_name col
from user_cons_columns
where constraint_name=c.constraint_name order by position)
loop
dbms_output.put_line('. '||c2.col);
end loop;
i:=1;
dbms_output.put_line('. Remote columns:');
for c3 in (select column_name col
from user_cons_columns@&prex.lnk
where constraint_name=c.constraint_name
)
loop
dbms_output.put_line('. '||c3.col);
end loop;
end loop;
end;
/
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT DIFFERENCES IN CHECK CONSTRAINT DEFS
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set serveroutput on
declare
cursor c1 is select constraint_name,constraint_type,table_name
from &prex.comcons where constraint_type='C';
cons varchar2(50);
tab1 varchar2(50);
tab2 varchar2(50);
search1 varchar2(32000);
search2 varchar2(32000);
begin
dbms_output.enable(100000);
for c in c1 loop
select search_condition into search1 from user_constraints
where constraint_name=c.constraint_name;
select search_condition into search2 from user_constraints@&prex.lnk
where constraint_name=c.constraint_name;
if search1 != search2 then
dbms_output.put_line('Check constraint '||c.constraint_name||
' defined differently!');
dbms_output.put_line('. Local definition:');
dbms_output.put_line('. '||search1);
dbms_output.put_line('. Remote definition:');
dbms_output.put_line('. '||search2);
end if;
end loop;
end;
/
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT TRIGGERS MISSING IN REMOTE SCHEMA
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select trigger_name from user_Triggers minus
select trigger_name from user_Triggers@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT TRIGGERS MISSING IN THIS SCHEMA
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select trigger_name from user_Triggers minus
select trigger_name from user_Triggers@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT TRIGGER DEFINITION DIFFERENCES ON COMMON TRIGGERS
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set serveroutput on
declare
cursor c1 is select
TRIGGER_NAME,TRIGGER_TYPE,TRIGGERING_EVENT,
TABLE_NAME,REFERENCING_NAMES,rtrim(WHEN_CLAUSE,' '),STATUS,
rtrim(replace(description,'"&thisuser".',null),' ') DESCRIPTION,
TRIGGER_BODY from user_Triggers;
nam1 varchar2(30);
type1 varchar2(16);
event1 varchar2(26);
table1 varchar2(30);
ref1 varchar2(87);
when1 varchar2(2000);
status1 varchar2(8);
desc1 varchar2(2000);
body1 varchar2(32000);
type2 varchar2(16);
event2 varchar2(26);
table2 varchar2(30);
ref2 varchar2(87);
when2 varchar2(2000);
status2 varchar2(8);
desc2 varchar2(2000);
body2 varchar2(32000);
pr_head boolean;
begin
dbms_output.enable(100000);
open c1;
loop
fetch c1 into nam1,type1,event1,table1,ref1,when1,status1,desc1,body1;
exit when c1%notfound;
begin
select
TRIGGER_TYPE,TRIGGERING_EVENT,
TABLE_NAME,REFERENCING_NAMES,rtrim(WHEN_CLAUSE,' '),STATUS,
rtrim(replace(description,upper('"&remoteschema".'),null),' ') DESCRIPTION,
TRIGGER_BODY
into type2,event2,table2,ref2,when2,status2,desc2,body2
from user_Triggers@&prex.lnk
where trigger_name=nam1;
pr_head := FALSE;
if table1 != table2 then
dbms_output.put_line('T R I G G E R : '||nam1);
dbms_output.put_line('-------------------------------------------------'||
'-----------------------');
pr_head := TRUE;
dbms_output.put_line(' ');
dbms_output.put_line('DEFINED ON DIFFERENT TABLES!');
dbms_output.put_line('. This table_name : '||table1);
dbms_output.put_line('. Remote table_name: '||table2);
end if;
if event1 != event2 then
if not pr_head then
dbms_output.put_line('T R I G G E R : '||nam1);
dbms_output.put_line('-------------------------------------------------'||
'-----------------------');
pr_head := TRUE;
end if;
dbms_output.put_line(' ');
dbms_output.put_line('DEFINED FOR DIFFERENT EVENTS!');
dbms_output.put_line('. This event: '||event1);
dbms_output.put_line('. Remote event: '||event2);
end if;
if type1 != type2 then
if not pr_head then
dbms_output.put_line('T R I G G E R : '||nam1);
dbms_output.put_line('-------------------------------------------------'||
'-----------------------');
pr_head := TRUE;
end if;
dbms_output.put_line(' ');
dbms_output.put_line('DIFFERENT TYPES!');
dbms_output.put_line('. This type: '||type1);
dbms_output.put_line('. Remote: '||type2);
end if;
if ref1 != ref2 then
if not pr_head then
dbms_output.put_line('T R I G G E R : '||nam1);
dbms_output.put_line('-------------------------------------------------'||
'-----------------------');
pr_head := TRUE;
end if;
dbms_output.put_line(' ');
dbms_output.put_line('DIFFERENT REFERENCES!');
dbms_output.put_line('. This ref: '||ref1);
dbms_output.put_line('. Remote: '||ref2);
end if;
if when1 != when2 then
dbms_output.put_line(' ');
if not pr_head then
dbms_output.put_line('T R I G G E R : '||nam1);
dbms_output.put_line('-------------------------------------------------'||
'-----------------------');
pr_head := TRUE;
end if;
dbms_output.put_line('DIFFERENT WHEN CLAUSES!');
dbms_output.put_line('. Local when_clause:');
dbms_output.put_line(when1);
dbms_output.put_line('. Remote when_clause: ');
dbms_output.put_line(when2);
end if;
if status1 != status2 then
dbms_output.put_line(' ');
dbms_output.put_line('DIFFERENT STATUS!');
dbms_output.put_line('. Local status: '||status1);
dbms_output.put_line('. Remote status: '||status2);
end if;
if replace(desc1,chr(10),'') != replace(desc2,chr(10),'') then
dbms_output.put_line(' ');
dbms_output.put_line('DIFFERENT DESCRIPTIONS!');
dbms_output.put_line('Local definition: ');
dbms_output.put_line(desc1);
dbms_output.put_line('Remote definition: ');
dbms_output.put_line(desc2);
end if;
if body1 != body2 then
dbms_output.put_line(' ');
dbms_output.put_line('THE PL/SQL BLOCKS ARE DIFFERENT! ');
dbms_output.put_line(' ');
end if;
exception when NO_DATA_FOUND then null;
when others then raise_application_error(-20010,SQLERRM);
end;
end loop;
end;
/
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT MISSING PROCEDURES/PACKAGES/FUNCTIONS IN REMOTE SCHEMA
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select distinct name,type from user_source minus
select distinct name,type from user_source@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT MISSING PROCEDURES/PACKAGES/FUNCTIONS IN LOCAL SCHEMA
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select distinct name,type from user_source@&prex.lnk minus
select distinct name,type from user_source;
create table &prex.comcod as
select distinct name,type from user_source intersect
select distinct name,type from user_source@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT PROCEDURES/PACKAGES/FUNCTIONS WITH DIFFERENT DEFINITIONS
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select distinct q.name Object_name,q.type Object_type from
(select a.name,a.type,a.line,a.text
from user_source a, &prex.comcod b
where a.name=b.name union
select a.name,a.type,a.line,a.text
from user_source@&prex.lnk a, &prex.comcod b
where a.name=b.name
minus
(select a.name,a.type,a.line,a.text
from user_source a, &prex.comcod b
where a.name=b.name
intersect
select a.name,a.type,a.line,a.text
from user_source@&prex.lnk a, &prex.comcod b
where a.name=b.name )) q;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT VIEWS MISSING IN THIS SCHEMA:
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
create table &prex.common_views as
select view_name from user_views@&prex.lnk
intersect
select view_name from user_views;
select view_name from user_views@&prex.lnk
minus
select view_name from &prex.common_views;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT VIEWS MISSING IN REMOTE SCHEMA:
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
select view_name from user_views
minus
select view_name from user_views@&prex.lnk;
PROMPT
PROMPT
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PROMPT VIEWS WITH DIFFERENCES IN THE DEFINITION
PROMPT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
declare
def1 varchar2(32000);
def2 varchar2(32000);
len1 number;
len2 number;
i number;
cursor c1 is select view_name from &prex.common_views;
begin
dbms_output.enable(100000);
for c in c1 loop
select text,text_length into def1,len1
from user_Views where view_name=c.view_name;
select text,text_length into def2,len2
from user_Views@&prex.lnk where view_name=c.view_name;
i := 1;
def1:=replace(def1,' ','');
def2:=replace(def2,' ','');
if def1 != def2 or length(def1) != length(def2) then
dbms_output.put_line(lpad('-',35+length(c.view_name),'-'));
dbms_output.put_line('| '||c.view_name ||
' |');
dbms_output.put_line(lpad('-',35+length(c.view_name),'-'));
dbms_output.put_line('Local text_length: ' || to_char(len1));
dbms_output.put_line('Remote text_length): ' || to_char(len2));
dbms_output.put_line(' ');
i := 1;
while i <= length(def1) loop
if substr(def1,i,240) != substr(def2,i,240) then
dbms_output.put_line('Difference at offset ' || to_char(i)
);
dbms_output.put_line(' local: ' || substr(def1,i,240));
dbms_output.put_line(' remote: ' || substr(def2,i,240));
end if;
i := i + 240;
end loop;
end if;
if length(def2) > length(def1) then
dbms_output.put_line('Remote longer than Local. Next 255 bytes: ');
dbms_output.put_line(substr(def2,length(def1),255));
end if;
end loop;
end;
/
drop database link &prex.lnk;
drop table &prex.comcod;
drop table &prex.diff_cols1;
drop table &prex.diff_cols2;
drop table &prex.common_tables;
drop table &prex.common_views;
drop table &prex.ind;
drop table &prex.ind1;
drop table &prex.ind2;
drop table &prex.comcons;
spool off
set verify on
set feedback on
undef prex
undef prefx
undef a
undef thisuser
undef b
undef REMOTESCHEMA
undef REMOTEPASSW
undef connstring
undef c
undef todaysdate

26
vdh/constraint_info.sql Normal file
View File

@@ -0,0 +1,26 @@
set verify off
set linesize 300
set long 30
column constraint_name format a30
column constraint_type format a2 heading CT
column column_name format a30
column position format 99 heading CP
column r_owner format a30
column r_constraint_name format a30
break on constraint_name skip 1 on constraint_type
select con.constraint_name, con.constraint_type, con.status, con.r_owner, con.r_constraint_name,
col.column_name, col.position, con.search_condition
from dba_constraints con,
dba_cons_columns col
where col.owner = con.owner
and col.constraint_name = con.constraint_name
and con.owner = '&T_OWNER'
and con.table_name = '&T_NAME'
order by con.constraint_name, col.position;
clear breaks

18
vdh/cpu_table.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
gawk '
BEGIN {
FS = ":"
printf "%5-s %6-s %4-s\n", "OS ID", "Socket", "Core"
printf "%5-s %6-s %4-s\n", "-----", "------", "----"
}
{
if ($1 ~ /processor/) {
PROC = $2
} else if ($1 ~ /physical id/) {
PHYID = $2
} else if ($1 ~ /core id/) {
CID = $2
printf "%5s %6s %4s\n", PROC, PHYID, CID
}
}' /proc/cpuinfo

View File

@@ -0,0 +1,35 @@
create or replace function enkitec.display_raw (rawval raw, type varchar2)
return varchar2
is
cn number;
cv varchar2(32);
cd date;
cnv nvarchar2(32);
cr rowid;
cc char(32);
begin
-- Greg Rahn's display_raw script
if (type = 'NUMBER') then
dbms_stats.convert_raw_value(rawval, cn);
return to_char(cn);
elsif (type = 'VARCHAR2') then
dbms_stats.convert_raw_value(rawval, cv);
return to_char(cv);
elsif (type = 'DATE') then
dbms_stats.convert_raw_value(rawval, cd);
return to_char(cd,'dd-mon-yyyy');
elsif (type = 'NVARCHAR2') then
dbms_stats.convert_raw_value(rawval, cnv);
return to_char(cnv);
elsif (type = 'ROWID') then
dbms_stats.convert_raw_value(rawval, cr);
return to_char(cnv);
elsif (type = 'CHAR') then
dbms_stats.convert_raw_value(rawval, cc);
return to_char(cc);
else
return 'UNKNOWN DATATYPE';
end if;
end;
/

116
vdh/create_sql_profile.sql Normal file
View File

@@ -0,0 +1,116 @@
----------------------------------------------------------------------------------------
--
-- File name: create_sql_profile.sql
--
-- Purpose: Create SQL Profile based on Outline hints in V$SQL.OTHER_XML.
--
-- Author: Kerry Osborne
--
-- Usage: This scripts prompts for four values.
--
-- sql_id: the sql_id of the statement to attach the profile to (must be in the shared pool)
--
-- child_no: the child_no of the statement from v$sql
--
-- profile_name: the name of the profile to be generated
--
-- category: the name of the category for the profile
--
-- force_macthing: a toggle to turn on or off the force_matching feature
--
-- Description:
--
-- Based on a script by Randolf Giest.
--
-- Mods: This is the 2nd version of this script which removes dependency on rg_sqlprof1.sql.
--
-- See kerryosborne.oracle-guy.com for additional information.
---------------------------------------------------------------------------------------
--
-- @rg_sqlprof1 '&&sql_id' &&child_no '&&category' '&force_matching'
set feedback off
set sqlblanklines on
accept sql_id -
prompt 'Enter value for sql_id: ' -
default 'X0X0X0X0'
accept child_no -
prompt 'Enter value for child_no (0): ' -
default '0'
accept profile_name -
prompt 'Enter value for profile_name (PROF_sqlid_planhash): ' -
default 'X0X0X0X0'
accept category -
prompt 'Enter value for category (DEFAULT): ' -
default 'DEFAULT'
accept force_matching -
prompt 'Enter value for force_matching (FALSE): ' -
default 'false'
declare
ar_profile_hints sys.sqlprof_attr;
cl_sql_text clob;
l_profile_name varchar2(30);
begin
select
extractvalue(value(d), '/hint') as outline_hints
bulk collect
into
ar_profile_hints
from
xmltable('/*/outline_data/hint'
passing (
select
xmltype(other_xml) as xmlval
from
v$sql_plan
where
sql_id = '&&sql_id'
and child_number = &&child_no
and other_xml is not null
)
) d;
select
sql_fulltext,
decode('&&profile_name','X0X0X0X0','PROF_&&sql_id'||'_'||plan_hash_value,'&&profile_name')
into
cl_sql_text, l_profile_name
from
v$sql
where
sql_id = '&&sql_id'
and child_number = &&child_no;
dbms_sqltune.import_sql_profile(
sql_text => cl_sql_text,
profile => ar_profile_hints,
category => '&&category',
name => l_profile_name,
force_match => &&force_matching
-- replace => true
);
dbms_output.put_line(' ');
dbms_output.put_line('SQL Profile '||l_profile_name||' created.');
dbms_output.put_line(' ');
exception
when NO_DATA_FOUND then
dbms_output.put_line(' ');
dbms_output.put_line('ERROR: sql_id: '||'&&sql_id'||' Child: '||'&&child_no'||' not found in v$sql.');
dbms_output.put_line(' ');
end;
/
undef sql_id
undef child_no
undef profile_name
undef category
undef force_matching
set sqlblanklines off
set feedback on

View File

@@ -0,0 +1,23 @@
create tablespace test_small
datafile 'c:\oracle\oradata\vdev1\test\test_small01.dbf'
size 71744K
extent management local uniform size 64K
/
create tablespace test_medium
datafile 'c:\oracle\oradata\vdev1\test\test_medium01.dbf'
size 102464K
extent management local uniform size 512K
/
create tablespace tools_small
datafile 'c:\oracle\oradata\vdev1\tools\tools_small01.dbf'
size 71744K
extent management local uniform size 64K
/
create tablespace tools_medium
datafile 'c:\oracle\oradata\vdev1\tools\tools_medium01.dbf'
size 102464K
extent management local uniform size 512K
/

25
vdh/create_user.sql Normal file
View File

@@ -0,0 +1,25 @@
set verify off
accept l_user char prompt 'username: '
accept l_pass char prompt 'password: '
accept l_tablespace char prompt 'tablespace: '
accept l_quota number prompt 'quota (in MB) on tablespace: '
accept l_temp_tablespace char prompt 'temporary tablespace: '
create user &&l_user identified by &l_pass
default tablespace &&l_tablespace
temporary tablespace &&l_temp_tablespace
quota &&l_quota.M ON &&l_tablespace
/
grant create session to &&l_user;
grant create table to &&l_user;
grant create view to &&l_user;
grant create trigger to &&l_user;
grant create procedure to &&l_user;
grant create type to &&l_user;
grant create sequence to &&l_user;
grant create synonym to &&l_user;
grant query rewrite to &&l_user;
exit;

63
vdh/db_cache_advice.sql Normal file
View File

@@ -0,0 +1,63 @@
set linesize 250
set pages 50000
column block_size format a10 heading "Block Size"
column instance format a15 heading "Instance"
column size_for_estimate format 9G999G990D99 heading "Cache Size (MB)"
column estd_physical_read_factor format 990D999 heading "Factor"
column estd_physical_reads format 99G999G999G999G990 heading "Est Phys Reads"
column estd_phys_reads_delta format 999G999G999G990 heading "Est Phys Reads Del"
column estd_cluster_reads format 99G999G999G999G990 heading "Est Clust Reads"
column estd_cls_reads_delta format 999G999G999G990 heading "Est Clust Reads Del"
column estd_physical_read_time format 9G999G999G990 heading "Est Phys Read Time"
column estd_phys_read_time_delta format 9G999G999G990 heading "Est Phys Read Time Del"
column estd_pct_of_db_time_for_reads format 9990D99 heading "Est % read"
column estd_cluster_read_time format 9G999G999G990 heading "Est Clust Read Time"
column estd_cls_read_time_delta format 9G999G999G990 heading "Est Clust Read Time Del"
break on name skip 2 on block_size on instance skip 1
with current_size as
( select
inst_id,
block_size,
estd_physical_reads,
estd_physical_read_time,
estd_cluster_reads,
estd_cluster_read_time
from
gv$db_cache_advice
where
advice_status = 'ON'
and estd_physical_read_factor = 1
)
select
ca.block_size || decode(name, 'DEFAULT', ' (*)') block_size,
ca.inst_id || ' (' || to_char(inst.startup_time, 'DD/MM/YYYY') || ')' instance,
ca.size_for_estimate,
ca.estd_physical_read_factor,
ca.estd_pct_of_db_time_for_reads,
ca.estd_physical_reads,
(ca.estd_physical_reads - cs.estd_physical_reads) estd_phys_reads_delta,
ca.estd_physical_read_time,
( ca.estd_physical_read_time - cs.estd_physical_read_time) estd_phys_read_time_delta,
ca.estd_cluster_reads,
(ca.estd_cluster_reads - cs.estd_cluster_reads) estd_cls_reads_delta,
ca.estd_cluster_read_time,
( ca.estd_cluster_read_time - cs.estd_cluster_read_time) estd_cls_read_time_delta
from
gv$db_cache_advice ca,
current_size cs,
gv$instance inst
where
ca.block_size = cs.block_size
and ca.inst_id = cs.inst_id
and inst.inst_id = ca.inst_id
and ca.advice_status = 'ON'
order by
ca.block_size,
ca.inst_id,
ca.size_for_estimate
;
clear breaks

282
vdh/db_info.sql Normal file
View File

@@ -0,0 +1,282 @@
/* run this script as a dba user
on the db server, from the directory where this script is stored:
set ORACLE_SID=<DB NAME>
sqlplus "/ as sysdba"
@db_info.sql <logfile>
exit
this will generate a logfile in the current directory
*/
set pagesize 9999
set linesize 150
set serveroutput on
set trimspool on
set echo off
set feedback off
spool &1
--------------------------------------------- DB ----------------------------------------------------------------------
column db_unique_name format a15
column platform_name format a20
column host_name format a30
column name format a10
column log_mode format a15
column force_logging format a3 heading FL
column protection_mode format a25
column protection_level format a25
column flashback_on format a3 heading FO
select name, log_mode, force_logging, protection_level, protection_mode, database_role
from v$database;
--------------------------------------------- DB PARAMETERS ------------------------------------------------------------
set linesize 180
set pagesize 9999
COLUMN display_value FORMAT a15 word_wrapped
COLUMN value FORMAT a75 word_wrapped
COLUMN name FORMAT a35
column type format 999
select x.inst_id inst_id,ksppinm name,ksppity type,
ksppstvl value, ksppstdf isdefault
from x$ksppi x, x$ksppcv y
where (x.indx = y.indx)
and ( ksppstdf = 'FALSE'
and translate(ksppinm,'_','#') not like '##%'
or (ksppstdf = 'FALSE' and translate(ksppinm,'_','#') like '#%' and translate(ksppinm,'_','#') not like '##%')
)
order by x.inst_id, ksppinm;
column property_value format a40
select property_name, property_value
from database_properties
order by property_name;
--------------------------------------------- INSTALLED OPTIONS --------------------------------------------------------
column comp_name format a50
select comp_name, version, status
from dba_registry
order by comp_name;
--------------------------------------------- DB SIZES ------------------------------------------------------------------
column name format a25 heading "tablespace name"
column space_mb format 99g999g990D99 heading "curr df mbytes"
column maxspace_mb format 99g999g990D99 heading "max df mbytes"
column used format 99g999g990D99 heading "used mbytes"
column df_free format 99g999g990D99 heading "curr df free mbytes"
column maxdf_free format 99g999g990D99 heading "max df free mbytes"
column pct_free format 990D99 heading "% free"
column pct_maxfile_free format 990D99 heading "% maxfile free"
break on report
compute sum of space_mb on report
compute sum of maxspace_mb on report
compute sum of df_free on report
compute sum of maxdf_free on report
compute sum of used on report
prompt
prompt DB - Sizes
prompt __________
select df.tablespace_name name, df.space space_mb, df.maxspace maxspace_mb, (df.space - nvl(fs.freespace,0)) used,
nvl(fs.freespace,0) df_free, (nvl(fs.freespace,0) + df.maxspace - df.space) maxdf_free,
100 * (nvl(fs.freespace,0) / df.space) pct_free,
100 * ((nvl(fs.freespace,0) + df.maxspace - df.space) / df.maxspace) pct_maxfile_free
from ( select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_data_files
group by tablespace_name
) df,
( select tablespace_name, sum(bytes)/1024/1024 freespace
from dba_free_space
group by tablespace_name
) fs
where df.tablespace_name = fs.tablespace_name(+)
order by name;
clear breaks
--------------------------------------------- TABLESPACE INFO --------------------------------------------------------------
prompt
prompt tablespace info
prompt _______________
column max_mb format 9G999G990D99
column curr_mb format 9G999G990D99
column free_mb format 9G999G990D99
column pct_free format 900D99 heading "%FREE"
column NE format 999999D99
column SSM format a6
column AT format a8
column tablespace_name format a20
column EM format a10
column contents format a15
column block_size format 99999 heading bsize
select A.tablespace_name, block_size, A.contents, extent_management EM, allocation_type AT,
segment_space_management ssm, decode(allocation_type, 'UNIFORM',next_extent/1024,'') NE,
B.max_mb, B.curr_mb,
(B.max_mb - B.curr_mb) + nvl(c.free_mb,0) free_mb,
((100/B.max_mb)*(B.max_mb - B.curr_mb + nvl(c.free_mb,0))) pct_free
from dba_tablespaces A,
( select tablespace_name, sum(bytes)/1024/1024 curr_mb,
sum(greatest(bytes, maxbytes))/1024/1024 max_mb
from dba_data_files
group by tablespace_name
union all
select tablespace_name, sum(bytes)/1024/1024 curr_mb,
sum(greatest(bytes, maxbytes))/1024/1024 max_mb
from dba_temp_files
group by tablespace_name
) B,
( select tablespace_name, sum(bytes)/1024/1024 free_mb
from dba_free_space
group by tablespace_name
) C
where A.tablespace_name = B.tablespace_name
and A.tablespace_name = C.tablespace_name(+)
order by tablespace_name;
--------------------------------------------- DF DETAILS ------------------------------------------------------------------
column curr_mb format 9G999G990D99
column max_mb format 9G9999990D99
column incr_mb format 9G999G990D99
column file_name format a70
--column file_name format a60
column tablespace_name format a20
break on tablespace_name skip 1
set linesize 150
set pagesize 999
prompt
prompt datafiles info
prompt ______________
select A.tablespace_name, file_id, file_name, bytes/1024/1024 curr_mb, autoextensible,
maxbytes/1024/1024 max_mb, (increment_by * block_size)/1024/1024 incr_mb
from ( select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_data_files
union all
select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_temp_files
) A, dba_tablespaces B
where A.tablespace_name = B.tablespace_name
order by A.tablespace_name, file_name;
clear breaks;
--------------------------------------------- ONLINE REDO INFO ----------------------------------------------------------------
column member format a65
column type format a10
column status format a15
column arch format a4
break on type on thread# nodup skip 1 on type nodup on GROUP# nodup
prompt
prompt online redo info
prompt ________________
select type, A.thread#, A.group#, B.member, A.bytes/1024/1024 mb,A.status, arch
from ( select group#, thread#, bytes, status, archived arch
from v$log
union all
select group#, thread#, bytes, status, archived arch
from v$standby_log
) A, v$logfile B
where A.group# = B.group#
order by type, A.thread#, A.group#, B.member;
clear breaks
--------------------------------------------- REDO SIZES ------------------------------------------------------------------
column day_arch# format 999G999
column graph format a15
column dayname format a12
column day format a12
prompt
prompt redo sizes
prompt __________
column start_day format a22
column end_day format a22
column days_between format 99
column avg_archived_per_day format a13 heading avg_gen
select to_char(min(dag), 'DD/MM/YYYY HH24:MI:SS') start_day, to_char(max(dag) + 1 - 1/(24*60*60), 'DD/MM/YYYY HH24:MI:SS') end_day,
(max(dag) - min(dag) + 1) days_between,
to_char(avg(gen_archived_size),'9G999G999D99') avg_archived_per_day
from ( select trunc(completion_time) dag, sum(blocks * block_size)/1024/1024 gen_archived_size
from v$archived_log
where standby_dest = 'NO'
and months_between(trunc(sysdate), trunc(completion_time)) <= 1
and completion_time < trunc(sysdate)
group by trunc(completion_time)
);
/*
archived redo over the (max) last 10 days
*/
column day_arch_size format 99G999D99
column day_arch# format 999G999
column graph format a15
column dayname format a12
column day format a12
select to_char(day, 'DD/MM/YYYY') day, to_char(day,'DAY') dayname, day_arch_size, day_arch#, graph
from ( select trunc(completion_time) day, sum(blocks * block_size)/1024/1024 day_arch_size, count(*) day_arch#,
rpad('*',floor(count(*)/10),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 10
group by trunc(completion_time)
order by day
);
/*
archived redo per hour over the (max) last 2 days
*/
column hour_arch_size format 99G999D99
column hour_arch# format 9G999
column graph format a15
column dayname format a12
column dayhour format a18
break on dayname skip 1
select to_char(dayhour,'DAY') dayname, to_char(dayhour, 'DD/MM/YYYY HH24:MI') dayhour, hour_arch_size, hour_arch#, graph
from ( select trunc(completion_time, 'HH') dayhour, sum(blocks * block_size)/1024/1024 hour_arch_size, count(*) hour_arch#,
rpad('*',floor(count(*)/4),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 2
group by trunc(completion_time, 'HH')
order by dayhour
);
clear breaks;
spool off
exit

267
vdh/db_info2.sql Normal file
View File

@@ -0,0 +1,267 @@
/* run this script as a dba user
on the db server, from the directory where this script is stored:
set ORACLE_SID=<DB NAME>
sqlplus "/ as sysdba"
@db_info.sql <logfile>
exit
this will generate a logfile (db_info.txt) in the current directory
*/
set pagesize 9999
set linesize 150
set serveroutput on
set trimspool on
set echo off
set feedback off
spool &1.txt
--------------------------------------------- DB ----------------------------------------------------------------------
column platform_name format a40
column name format a15
column db_unique_name format a20
select name, db_unique_name, platform_name, flashback_on, log_mode
from v$database;
--------------------------------------------- DB PARAMETERS ------------------------------------------------------------
set linesize 180
set pagesize 9999
COLUMN display_value FORMAT a15 word_wrapped
COLUMN value FORMAT a75 word_wrapped
COLUMN name FORMAT a35
select x.inst_id inst_id,ksppinm name,ksppity type,
ksppstvl value, ksppstdf isdefault
from x$ksppi x, x$ksppcv y
where (x.indx = y.indx)
and ( ksppstdf = 'FALSE'
or translate(ksppinm,'_','#') like '##%'
-- or translate(ksppinm,'_','#') like '#%'
)
order by x.inst_id, ksppinm;
--------------------------------------------- INSTALLED OPTIONS --------------------------------------------------------
column comp_name format a50
select comp_name, version, status
from dba_registry
order by comp_name;
--------------------------------------------- DB SIZES ------------------------------------------------------------------
column name format a25 heading "tablespace name"
column space_mb format 99g999g990D99 heading "curr df mbytes"
column maxspace_mb format 99g999g990D99 heading "max df mbytes"
column used format 99g999g990D99 heading "used mbytes"
column df_free format 99g999g990D99 heading "curr df free mbytes"
column maxdf_free format 99g999g990D99 heading "max df free mbytes"
column pct_free format 990D99 heading "% free"
column pct_maxfile_free format 990D99 heading "% maxfile free"
break on report
compute sum of space_mb on report
compute sum of maxspace_mb on report
compute sum of df_free on report
compute sum of maxdf_free on report
compute sum of used on report
prompt
prompt DB - Sizes
prompt __________
select df.tablespace_name name, df.space space_mb, df.maxspace maxspace_mb, (df.space - nvl(fs.freespace,0)) used,
nvl(fs.freespace,0) df_free, (nvl(fs.freespace,0) + df.maxspace - df.space) maxdf_free,
100 * (nvl(fs.freespace,0) / df.space) pct_free,
100 * ((nvl(fs.freespace,0) + df.maxspace - df.space) / df.maxspace) pct_maxfile_free
from ( select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_data_files
group by tablespace_name
) df,
( select tablespace_name, sum(bytes)/1024/1024 freespace
from dba_free_space
group by tablespace_name
) fs
where df.tablespace_name = fs.tablespace_name(+)
order by name;
clear breaks
--------------------------------------------- TABLESPACE INFO --------------------------------------------------------------
prompt
prompt tablespace info
prompt _______________
column max_mb format 9G999G990D99
column curr_mb format 9G999G990D99
column free_mb format 9G999G990D99
column pct_free format 900D99 heading "%FREE"
column NE format 999999D99
column SSM format a6
column AT format a8
column tablespace_name format a20
column EM format a10
column contents format a15
column block_size format 99999 heading bsize
select A.tablespace_name, block_size, A.contents, extent_management EM, allocation_type AT,
segment_space_management ssm, decode(allocation_type, 'UNIFORM',next_extent/1024,'') NE,
B.max_mb, B.curr_mb,
(B.max_mb - B.curr_mb) + nvl(c.free_mb,0) free_mb,
((100/B.max_mb)*(B.max_mb - B.curr_mb + nvl(c.free_mb,0))) pct_free
from dba_tablespaces A,
( select tablespace_name, sum(bytes)/1024/1024 curr_mb,
sum(greatest(bytes, maxbytes))/1024/1024 max_mb
from dba_data_files
group by tablespace_name
union all
select tablespace_name, sum(bytes)/1024/1024 curr_mb,
sum(greatest(bytes, maxbytes))/1024/1024 max_mb
from dba_temp_files
group by tablespace_name
) B,
( select tablespace_name, sum(bytes)/1024/1024 free_mb
from dba_free_space
group by tablespace_name
) C
where A.tablespace_name = B.tablespace_name
and A.tablespace_name = C.tablespace_name(+)
order by tablespace_name;
--------------------------------------------- DF DETAILS ------------------------------------------------------------------
column curr_mb format 9G999G990D99
column max_mb format 9G9999990D99
column incr_mb format 9G999G990D99
column file_name format a70
--column file_name format a60
column tablespace_name format a20
break on tablespace_name skip 1
set linesize 150
set pagesize 999
prompt
prompt datafiles info
prompt ______________
select A.tablespace_name, file_id, file_name, bytes/1024/1024 curr_mb, autoextensible,
maxbytes/1024/1024 max_mb, (increment_by * block_size)/1024/1024 incr_mb
from ( select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_data_files
union all
select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_temp_files
) A, dba_tablespaces B
where A.tablespace_name = B.tablespace_name
order by A.tablespace_name, file_name;
clear breaks;
--------------------------------------------- ONLINE REDO INFO ----------------------------------------------------------------
column member format a55
column type format a10
column status format a20
column arch format a4
break on type on thread# nodup skip 1 on type nodup on GROUP# nodup
prompt
prompt online redo info
prompt ________________
select type, A.thread#, A.group#, B.member, A.bytes/1024/1024 mb,A.status, arch
from ( select group#, thread#, bytes, status, archived arch
from v$log
union all
select group#, thread#, bytes, status, archived arch
from v$standby_log
) A, v$logfile B
where A.group# = B.group#
order by type, A.thread#, A.group#, B.member;
clear breaks
--------------------------------------------- REDO SIZES ------------------------------------------------------------------
column day_arch# format 999G999
column graph format a15
column dayname format a12
column day format a12
prompt
prompt redo sizes
prompt __________
column start_day format a22
column end_day format a22
column days_between format 99
column avg_archived_per_day format a13 heading avg_gen
select to_char(min(dag), 'DD/MM/YYYY HH24:MI:SS') start_day, to_char(max(dag) + 1 - 1/(24*60*60), 'DD/MM/YYYY HH24:MI:SS') end_day,
(max(dag) - min(dag) + 1) days_between,
to_char(avg(gen_archived_size),'9G999G999D99') avg_archived_per_day
from ( select trunc(completion_time) dag, sum(blocks * block_size)/1024/1024 gen_archived_size
from v$archived_log
where standby_dest = 'NO'
and months_between(trunc(sysdate), trunc(completion_time)) <= 1
and completion_time < trunc(sysdate)
group by trunc(completion_time)
);
/*
archived redo over the (max) last 10 days
*/
column day_arch_size format 99G999D99
column day_arch# format 999G999
column graph format a15
column dayname format a12
column day format a12
select to_char(day, 'DD/MM/YYYY') day, to_char(day,'DAY') dayname, day_arch_size, day_arch#, graph
from ( select trunc(completion_time) day, sum(blocks * block_size)/1024/1024 day_arch_size, count(*) day_arch#,
rpad('*',floor(count(*)/10),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 10
group by trunc(completion_time)
order by day
);
/*
archived redo per hour over the (max) last 2 days
*/
column hour_arch_size format 99G999D99
column hour_arch# format 9G999
column graph format a15
column dayname format a12
column dayhour format a18
break on dayname skip 1
select to_char(dayhour,'DAY') dayname, to_char(dayhour, 'DD/MM/YYYY HH24:MI') dayhour, hour_arch_size, hour_arch#, graph
from ( select trunc(completion_time, 'HH') dayhour, sum(blocks * block_size)/1024/1024 hour_arch_size, count(*) hour_arch#,
rpad('*',floor(count(*)/4),'*') graph
from v$archived_log
where standby_dest = 'NO'
and completion_time >= trunc(sysdate) - 2
group by trunc(completion_time, 'HH')
order by dayhour
);
clear breaks;
spool off

90
vdh/db_report.sql Normal file
View File

@@ -0,0 +1,90 @@
clear breaks
set pagesize 9999
set serveroutput on
set trimspool on
set echo off
set feedback 1
----------------------------------------- either specify a logfile name yourself or one will be generated for you
set verify off
set feedback off
column dcol new_value spoolname noprint
column inputpar01 new_value 1 noprint
select 1 inputpar01 from dual where 1=2;
select
nvl('&1', db_unique_name || '_' || to_char(sysdate,'YYYYMMDDHH24MISS') || '_db_overview.log') dcol
from
v$database
;
undefine 1
spool &spoolname
prompt version and os info
prompt ...................
select * from v$version;
@list_db_patches.sql
column value clear
column value format 999G999G999G999G999D99
column value clear
select * from v$osstat order by stat_name;
prompt
prompt tablespace and datafiles details
prompt ................................
@db_size2.sql
@tbs_info.sql
@df_details2.sql
@online_logfiles_info.sql
@fra_usage.sql
prompt ASM layout
prompt ..........
@asm_diskgroup_info.sql
@dg_attributes.sql
prompt DB Config
prompt .........
@list_parameters2.sql
@list_arch_dest.sql
@sga_report.sql
@db_cache_advice.sql
prompt DB Jobs
prompt .......
@get_job_overview.sql
@show_autotasks.sql
prompt RMAN backups
prompt ............
@rman_backup_overview2.sql
prompt Some load info
prompt ..............
@archived_redo_stats.sql
@undo_usage_24window.sql
@top_segments_size.sql
set linesize 300
@top_seg_history.sql
@tblspace_growth_per_day.sql
spool off;

30
vdh/db_size.sql Normal file
View File

@@ -0,0 +1,30 @@
column dummy noprint
column pct_used format 999D9 heading "%|Used"
column name format a25 heading "Tablespace Name"
column bytes format 9G999G999G999G999 heading "Total Megs"
column used format 99G999G999G999 heading "Used"
column free format 999G999G999G999 heading "Free"
break on report
compute sum of bytes on report
compute sum of free on report
compute sum of used on report
select a.tablespace_name name, b.tablespace_name dummy,
sum(b.bytes)/count( distinct a.file_id||'.'||a.block_id ) /1024/1024 bytes,
sum(b.bytes)/count( distinct a.file_id||'.'||a.block_id )/1024/1024 - sum(a.bytes)/count( distinct b.file_id )/1024/1024 used,
sum(a.bytes)/count( distinct b.file_id ) /1024/1024 free,
100 * ( (sum(b.bytes)/count( distinct a.file_id||'.'||a.block_id )) - (sum(a.bytes)/count( distinct b.file_id ) )) / (sum(b.bytes)/count( distinct a.file_id||'.'||a.block_id )) pct_used
from sys.dba_free_space a, sys.dba_data_files b
where a.tablespace_name = b.tablespace_name
group by a.tablespace_name, b.tablespace_name;

51
vdh/db_size2.sql Normal file
View File

@@ -0,0 +1,51 @@
set pagesize 999
set linesize 200
set verify off
set feedback off
column inputpar01 new_value 1 noprint
select 1 inputpar01 from dual where 1=2;
set feedback 6
column name format a25 heading "tablespace name"
column space_mb format 9G999g999g990D99 heading "curr df mbytes"
column maxspace_mb format 9G999g999g990D99 heading "max df mbytes"
column used format 9G999g999g990D99 heading "used mbytes"
column df_free format 9G999g999g990D99 heading "curr df free mbytes"
column maxdf_free format 9G999g999g990D99 heading "max df free mbytes"
column pct_free format 990D99 heading "% free"
column pct_maxfile_free format 990D99 heading "% maxfile free"
break on report
compute sum of space_mb on report
compute sum of maxspace_mb on report
compute sum of df_free on report
compute sum of maxdf_free on report
compute sum of used on report
select df.tablespace_name name, df.space space_mb, df.maxspace maxspace_mb, (df.space - nvl(fs.freespace,0)) used,
nvl(fs.freespace,0) df_free, (nvl(fs.freespace,0) + df.maxspace - df.space) maxdf_free,
100 * (nvl(fs.freespace,0) / df.space) pct_free,
100 * ((nvl(fs.freespace,0) + df.maxspace - df.space) / df.maxspace) pct_maxfile_free
from ( select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_data_files
group by tablespace_name
union all
select tablespace_name, sum(bytes)/1024/1024 space, sum(greatest(maxbytes,bytes))/1024/1024 maxspace
from dba_temp_files
group by tablespace_name
) df,
( select tablespace_name, sum(bytes)/1024/1024 freespace
from dba_free_space
group by tablespace_name
) fs
where df.tablespace_name = fs.tablespace_name(+)
and df.tablespace_name like nvl('&1', '%')
order by name;
clear breaks
clear computes
undef 1

6
vdh/dba_tables.sql Normal file
View File

@@ -0,0 +1,6 @@
set lines 150
select owner, table_name, status, last_analyzed, num_rows, blocks, degree
from dba_tables
where owner like nvl('&owner',owner)
and table_name like nvl('&table_name',table_name)
/

51
vdh/df_details.sql Normal file
View File

@@ -0,0 +1,51 @@
set linesize 120
set pages 999
set verify off
column file_name format a70
column mb format 9G999G999D99
column incr_by_mb format 9G999D99
column max_mb format 9G999G999D99
compute sum of MB on report
compute sum of max_MB on report
break on report
with
files as
( select
file_id, file_name, bytes, maxbytes, increment_by
from
dba_data_files
where
tablespace_name = '&&tablespacename'
union all
select
file_id, file_name, bytes, maxbytes, increment_by
from
dba_temp_files
where
tablespace_name = '&&tablespacename'
),
blocksize as
( select
block_size
from
dba_tablespaces
where
tablespace_name = '&&tablespacename'
)
select
files.file_id, files.file_name,
(files.bytes/1024/1024) MB,
(files.maxbytes/1024/1024) max_MB,
((files.increment_by * blocksize.block_size )/1024/1024) incr_by_mb
from
files,
blocksize
order by
file_id
/
clear breaks
undefine tablespacename

24
vdh/df_details2.sql Normal file
View File

@@ -0,0 +1,24 @@
column curr_mb format 9G999G990D99
column max_mb format 9G9999990D99
column incr_mb format 9G999G990D99
column file_name format a70
--column file_name format a60
column tablespace_name format a20
break on tablespace_name skip 1
set linesize 150
set pagesize 999
select A.tablespace_name, file_id, file_name, bytes/1024/1024 curr_mb, autoextensible,
maxbytes/1024/1024 max_mb, (increment_by * block_size)/1024/1024 incr_mb
from ( select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_data_files
union all
select tablespace_name, file_id, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_temp_files
) A, dba_tablespaces B
where A.tablespace_name = B.tablespace_name
order by A.tablespace_name, file_name;
clear breaks;

13
vdh/df_details_pre_9.sql Normal file
View File

@@ -0,0 +1,13 @@
select A.tablespace_name, file_name, bytes/1024/1024 curr_mb, autoextensible,
maxbytes/1024/1024 max_mb, (increment_by * (select value from v$parameter where name='db_block_size'))/1024/1024 incr_mb
from ( select tablespace_name, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_data_files
union all
select tablespace_name, file_name, bytes, autoextensible, maxbytes,
increment_by
from dba_temp_files
) A, dba_tablespaces B
where A.tablespace_name = B.tablespace_name
order by A.tablespace_name, file_name
/

View File

@@ -0,0 +1,20 @@
column db_block_size new_value _BLOCK_SIZE;
select to_number(value) db_block_size from v$parameter where name = 'db_block_size';
select
a.tablespace_name,
a.file_id,
a.file_name,
a.bytes/1024/1024 file_mb,
((c.block_id+(c.blocks-1)) * &_BLOCK_SIZE) /1024/1024 HWM_MB,
(a.bytes - ((c.block_id+(c.blocks-1)) * &_BLOCK_SIZE))/1024/1024 SAVING_mb
from dba_data_files a,
(select file_id,max(block_id) maximum
from dba_extents
group by file_id) b,
dba_extents c
where a.file_id = b.file_id
and c.file_id = b.file_id
and c.block_id = b.maximum
order by 6;

View File

@@ -0,0 +1,12 @@
set linesize 120
set pagesize 999
break on dest_id skip 2 on thread# skip 1;
select dest_id, thread#, sequence#, next_change#, to_char(next_time, 'DD/MM/YYYY HH24:MI:SS') next_time,
to_char(completion_time,'DD/MM/YYYY HH24:MI:SS') completion_time, archived, applied, deleted, status, fal
from v$archived_log
where standby_dest='YES'
and completion_time >= trunc(sysdate) -1
order by dest_id, thread#, sequence#;
clear breaks

12
vdh/dg_archives.sql Normal file
View File

@@ -0,0 +1,12 @@
select thread#, sequence#, to_char(first_time, 'DD/MM/YYYY HH24:MI') ft, archived, applied, fal
from v$archived_log
where first_time >= trunc(sysdate) -1
and standby_dest = 'YES'
order by thread#, sequence#;
select thread#, sequence#, to_char(first_time, 'DD/MM/YYYY HH24:MI') ft, archived, applied, fal
from v$archived_log
where first_time >= trunc(sysdate) -1
order by thread#, sequence#;

30
vdh/dg_attributes.sql Normal file
View File

@@ -0,0 +1,30 @@
set linesize 130
set pages 50000
column dg_number format 999999999 heading "dg number"
column dg_name format a30 heading "dg name"
column attr_name format a40 heading "attribute name"
column attr_value format a40 heading "attribute value"
column attr_incarnation format 99999 heading "incar"
break on dg_number skip 1 on dg_name
select
dg.group_number dg_number,
dg.name dg_name,
attr.name attr_name,
attr.value attr_value,
attr.attribute_incarnation attr_incarnation
from
v$asm_attribute attr,
v$asm_diskgroup dg
where
attr.group_number = dg.group_number
and attr.name not like 'template%'
order by
dg.group_number,
attr.name,
attr.attribute_incarnation
;
clear breaks

245
vdh/dg_lsby_diag.sql Normal file
View File

@@ -0,0 +1,245 @@
--------------------------- Script begins here ---------------------------
-- NAME: dg_lsby_diag.sql (Run on LOGICAL STANDBY)
-- ------------------------------------------------------------------------
-- Copyright 2002, Oracle Corporation
-- LAST UPDATED: 2/23/04
--
-- Usage: @dg_lsby_diag
-- ------------------------------------------------------------------------
-- PURPOSE:
-- This script is to be used to assist in collection information to help
-- troubeshoot Data Guard issues involving a Logical Standby.
-- ------------------------------------------------------------------------
-- DISCLAIMER:
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
-- ------------------------------------------------------------------------
-- Script output is as follows:
set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
select to_char(sysdate,'Mondd_hhmi') timecol,
'.out' spool_extension from sys.dual;
column output new_value dbname
select value || '_' output
from v$parameter where name = 'db_name';
spool dg_lsby_diag_&&dbname&&timestamp&&suffix
set linesize 79
set pagesize 180
set long 1000
set trim on
set trims on
alter session set nls_date_format = 'MM/DD HH24:MI:SS';
set feedback on
select to_char(sysdate) time from dual;
set echo on
-- The following select will give us the generic information about how
-- this standby is setup. The database_role should be logical standby as
-- that is what this script is intended to be ran on.
column ROLE format a7 tru
column NAME format a8 wrap
select name,database_role,log_mode,protection_mode
from v$database;
-- ARCHIVER can be (STOPPED | STARTED | FAILED). FAILED means that the
-- archiver failed to archive a log last time, but will try again within 5
-- minutes. LOG_SWITCH_WAIT The ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log
-- switching is waiting for. Note that if ALTER SYSTEM SWITCH LOGFILE is
-- hung, but there is room in the current online redo log, then value is
-- NULL
column host_name format a20 tru
column version format a9 tru
select instance_name,host_name,version,archiver,log_switch_wait
from v$instance;
-- The following query give us information about catpatch.
-- This way we can tell if the procedure doesn't match the image.
select version, modified, status from dba_registry
where comp_id = 'CATPROC';
-- Force logging and supplemental logging are not mandatory but are
-- recommended if you plan to switchover. During normal operations it is
-- acceptable for SWITCHOVER_STATUS to be SESSIONS ACTIVE or NOT ALLOWED.
column force_logging format a13 tru
column remote_archive format a14 tru
column dataguard_broker format a16 tru
select force_logging,remote_archive,supplemental_log_data_pk,
supplemental_log_data_ui,switchover_status,dataguard_broker
from v$database;
-- This query produces a list of all archive destinations. It shows if
-- they are enabled, what process is servicing that destination, if the
-- destination is local or remote, and if remote what the current mount ID
-- is.
column destination format a35 wrap
column process format a7
column archiver format a8
column ID format 99
column mid format 99
select dest_id "ID",destination,status,target,
schedule,process,mountid mid
from v$archive_dest order by dest_id;
-- This select will give further detail on the destinations as to what
-- options have been set. Register indicates whether or not the archived
-- redo log is registered in the remote destination control file.
set numwidth 8
column ID format 99
select dest_id "ID",archiver,transmit_mode,affirm,async_blocks async,
net_timeout net_time,delay_mins delay,reopen_secs reopen,
register,binding
from v$archive_dest order by dest_id;
-- Determine if any error conditions have been reached by querying the
-- v$dataguard_status view (view only available in 9.2.0 and above):
column message format a80
select message, timestamp
from v$dataguard_status
where severity in ('Error','Fatal')
order by timestamp;
-- Query v$managed_standby to see the status of processes involved in
-- the shipping redo on this system. Does not include processes needed to
-- apply redo.
select process,status,client_process,sequence#
from v$managed_standby;
-- Verify that log apply services on the standby are currently
-- running. If the query against V$LOGSTDBY returns no rows then logical
-- apply is not running.
column status format a50 wrap
column type format a11
set numwidth 15
SELECT TYPE, STATUS, HIGH_SCN
FROM V$LOGSTDBY;
-- The DBA_LOGSTDBY_PROGRESS view describes the progress of SQL apply
-- operations on the logical standby databases. The APPLIED_SCN indicates
-- that committed transactions at or below that SCN have been applied. The
-- NEWEST_SCN is the maximum SCN to which data could be applied if no more
-- logs were received. This is usually the MAX(NEXT_CHANGE#)-1 from
-- DBA_LOGSTDBY_LOG. When the value of NEWEST_SCN and APPLIED_SCN are the
-- equal then all available changes have been applied. If your
-- APPLIED_SCN is below NEWEST_SCN and is increasing then SQL apply is
-- currently processing changes.
set numwidth 15
select
(case
when newest_scn = applied_scn then 'Done'
when newest_scn <= applied_scn + 9 then 'Done?'
when newest_scn > (select max(next_change#) from dba_logstdby_log)
then 'Near done'
when (select count(*) from dba_logstdby_log
where (next_change#, thread#) not in
(select first_change#, thread# from dba_logstdby_log)) > 1
then 'Gap'
when newest_scn > applied_scn then 'Not Done'
else '---' end) "Fin?",
newest_scn, applied_scn, read_scn from dba_logstdby_progress;
select newest_time, applied_time, read_time from dba_logstdby_progress;
-- Determine if apply is lagging behind and by how much. Missing
-- sequence#'s in a range indicate that a gap exists.
set numwidth 15
column trd format 99
select thread# trd, sequence#,
first_change#, next_change#,
dict_begin beg, dict_end end,
to_char(timestamp, 'hh:mi:ss') timestamp,
(case when l.next_change# < p.read_scn then 'YES'
when l.first_change# < p.applied_scn then 'CURRENT'
else 'NO' end) applied
from dba_logstdby_log l, dba_logstdby_progress p
order by thread#, first_change#;
-- Get a history on logical standby apply activity.
set numwidth 15
select to_char(event_time, 'MM/DD HH24:MI:SS') time,
commit_scn, current_scn, event, status
from dba_logstdby_events
order by event_time, commit_scn, current_scn;
-- Dump logical standby stats
column name format a40
column value format a20
select * from v$logstdby_stats;
-- Dump logical standby parameters
column name format a33 wrap
column value format a33 wrap
column type format 99
select name, value, type from system.logstdby$parameters
order by type, name;
-- Gather log miner session and dictionary information.
set numwidth 15
select * from system.logmnr_session$;
select * from system.logmnr_dictionary$;
select * from system.logmnr_dictstate$;
select * from v$logmnr_session;
-- Query the log miner dictionary for key tables necessary to process
-- changes for logical standby Label security will move AUD$ from SYS to
-- SYSTEM. A synonym will remain in SYS but Logical Standby does not
-- support this.
set numwidth 5
column name format a9 wrap
column owner format a6 wrap
select o.logmnr_uid, o.obj#, o.objv#, u.name owner, o.name
from system.logmnr_obj$ o, system.logmnr_user$ u
where
o.logmnr_uid = u.logmnr_uid and
o.owner# = u.user# and
o.name in ('JOB$','JOBSEQ','SEQ$','AUD$',
'FGA_LOG$','IND$','COL$','LOGSTDBY$PARAMETER')
order by u.name;
-- Non-default init parameters.
column name format a30 tru
column value format a48 wra
select name, value
from v$parameter
where isdefault = 'FALSE';
spool off
--------------------------- Script ends here ---------------------------

163
vdh/dg_phy_stby_diag.sql Normal file
View File

@@ -0,0 +1,163 @@
-- NAME: new_dg_psby_diag.sql
--
-- Copyright 2002, Oracle Corporation
--
-- LAST UPDATED: 02-Sep-2015
--
-- Usage: @new_dg_psby_diag
--
-- (Run from sqlplus on PHYSICAL STANDBY, ACTIVE STANDBY as SYS)
--
-- PURPOSE:
--
-- This script is to be used to assist in the collection of information to help
-- troubleshoot Data Guard issues involving a Physical or Active Standby.
--
-- DISCLAIMER:
--
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
--
--
-- Script output is as follows:
set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
SELECT TO_CHAR(sysdate,'yyyymmdd_hh24mi') timecol, '.html' spool_extension FROM dual;
column output new_value dbname
SELECT value || '_' output FROM v$parameter WHERE name = 'db_unique_name';
spool new_dg_psby_diag_&&dbname&&timestamp&&suffix
set linesize 2000
set pagesize 50000
set numformat 999999999999999
set trim on
set trims on
set markup html on
set markup html entmap off
set feedback on
ALTER SESSION SET nls_date_format = 'DD-MON-YYYY HH24:MI:SS';
SELECT TO_CHAR(sysdate) time FROM dual;
set echo on
-- The following select will give us the generic information about how this standby is setup.
-- The DATABASE_ROLE should be STANDBY as that is what this script is intended to be run on.
-- PLATFORM_ID should match the PLATFORM_ID of the primary or conform to the supported options in
-- Note: 413484.1 Data Guard Support for Heterogeneous Primary and Physical Standbys in Same Data Guard Configuration.
-- FLASHBACK can be YES (recommended) or NO.
-- If PROTECTION_LEVEL is different from PROTECTION_MODE then for some reason the mode listed in PROTECTION_MODE experienced a need to downgrade.
-- Once the error condition has been corrected the PROTECTION_LEVEL should match the PROTECTION_MODE after the next log switch.
SELECT database_role role, name, db_unique_name, platform_id, open_mode, log_mode, flashback_on, protection_mode, protection_level FROM v$database;
-- FORCE_LOGGING is not mandatory but is recommended.
-- REMOTE_ARCHIVE should be ENABLE.
-- SUPPLEMENTAL_LOG_DATA_PK and SUPPLEMENTAL_LOG_DATA_UI must be enabled if this standby is associated with a primary that has a logical standby.
-- During normal operations it is acceptable for SWITCHOVER_STATUS to be NOT ALLOWED.
-- DATAGUARD_BROKER can be ENABLED (recommended) or DISABLED.
column force_logging format a13 tru
column supplemental_log_data_pk format a24 tru
column supplemental_log_data_ui format a24 tru
SELECT force_logging, remote_archive, supplemental_log_data_pk, supplemental_log_data_ui, switchover_status, dataguard_broker FROM v$database;
-- Check how many threads are enabled and started for this database. If the number of instances below does not match then not all instances are up.
SELECT thread#, instance, status FROM v$thread;
-- The number of instances returned below is the number currently running. If it does not match the number returned in Threads above then not all instances are up.
-- VERSION should match the version from the primary database.
-- ARCHIVER can be (STOPPED | STARTED | FAILED). FAILED means that the archiver failed to archive a log last time, but will try again within 5 minutes.
-- LOG_SWITCH_WAIT the ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log switching is waiting for.
-- Note that if ALTER SYSTEM SWITCH LOGFILE is hung, but there is room in the current online redo log, then the value is NULL.
column host_name format a32 wrap
SELECT thread#, instance_name, host_name, version, archiver, log_switch_wait FROM gv$instance ORDER BY thread#;
-- Check the number and size of online redo logs on each thread.
SELECT thread#, group#, sequence#, bytes, archived ,status FROM v$log ORDER BY thread#, group#;
-- The following query is run to see if standby redo logs have been created.
-- The standby redo logs should be the same size as the online redo logs.
-- There should be (( # of online logs per thread + 1) * # of threads) standby redo logs.
-- A value of 0 for the thread# means the log has never been allocated.
SELECT thread#, group#, sequence#, bytes, archived, status FROM v$standby_log order by thread#, group#;
-- This query produces a list of defined archive destinations.
-- It shows if they are enabled, what process is servicing that destination, if the destination is local or remote, and if remote what the current mount ID is.
-- For a physical standby we should have at least one remote destination that points the primary set.
column destination format a35 wrap
column process format a7
column ID format 99
column mid format 99
SELECT thread#, dest_id, destination, gvad.status, target, schedule, process, mountid mid FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- If the protection mode of the standby is set to anything higher than max performance then we need to make sure the remote destination that points to the primary is set with the correct options else we will have issues during switchover.
set numwidth 8
column archiver format a8
column ID format 99
column error format a55 wrap
SELECT thread#, dest_id, gvad.archiver, transmit_mode, affirm, async_blocks, net_timeout, delay_mins, reopen_secs reopen, register, binding FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- The following select will show any errors that occured the last time an attempt to archive to the destination was attempted.
-- If ERROR is blank and status is VALID then the archive completed correctly.
SELECT thread#, dest_id, gvad.status, error FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- The query below will determine if any error conditions have been reached by querying the v$dataguard_status view (view only available in 9.2.0 and above).
column message format a80
SELECT timestamp, gvi.thread#, message FROM gv$dataguard_status gvds, gv$instance gvi WHERE gvds.inst_id = gvi.inst_id AND severity in ('Error','Fatal') ORDER BY timestamp, thread#;
-- Query gv$managed_standby to see the status of processes involved in the shipping redo on this system.
-- Does not include processes needed to apply redo.
SELECT thread#, process, pid, status, client_process, client_pid, sequence#, block#, active_agents, known_agents FROM gv$managed_standby ORDER BY thread#, process;
-- Verify the last sequence# received and the last sequence# applied to standby database.
SELECT al.thrd "Thread", almax "Last Seq Received", lhmax "Last Seq Applied" FROM (select thread# thrd, MAX(sequence#) almax FROM v$archived_log WHERE resetlogs_change#=(SELECT resetlogs_change# FROM v$database) GROUP BY thread#) al, (SELECT thread# thrd, MAX(sequence#) lhmax FROM v$log_history WHERE resetlogs_change#=(SELECT resetlogs_change# FROM v$database) GROUP BY thread#) lh WHERE al.thrd = lh.thrd;
-- Check the transport lag and apply lag from the V$DATAGUARD_STATS view. This is only relevant when LGWR log transport and real time apply are in use.
SELECT * FROM v$dataguard_stats WHERE name LIKE '%lag%';
-- Check how often and how far the apply lags.
SELECT name, time, unit, count, TO_DATE(last_time_updated, 'MM/DD/YYYY HH24:MI:SS') FROM v$standby_event_histogram ORDER BY unit DESC, time;
-- The V$ARCHIVE_GAP fixed view on a physical standby database only returns the next gap that is currently blocking redo apply from continuing.
-- After resolving the identified gap and starting redo apply, query the V$ARCHIVE_GAP fixed view again on the physical standby database to determine the next gap sequence, if there is one.
SELECT * FROM v$archive_gap;
-- Non-default init parameters.
-- For a RAC DB Thread# = * means the value is the same for all threads (SID=*)
-- Threads with different values are shown with their individual thread# and values.
column num noprint
SELECT num, '*' "THREAD#", name, value FROM v$PARAMETER WHERE NUM IN (SELECT num FROM v$parameter WHERE (isdefault = 'FALSE' OR ismodified <> 'FALSE') AND name NOT LIKE 'nls%'
MINUS
SELECT num FROM gv$parameter gvp, gv$instance gvi WHERE num IN (SELECT DISTINCT gvpa.num FROM gv$parameter gvpa, gv$parameter gvpb WHERE gvpa.num = gvpb.num AND gvpa.value <> gvpb.value AND (gvpa.isdefault = 'FALSE' OR gvpa.ismodified <> 'FALSE') AND gvpa.name NOT LIKE 'nls%') AND gvi.inst_id = gvp.inst_id AND (gvp.isdefault = 'FALSE' OR gvp.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%')
UNION
SELECT num, TO_CHAR(thread#) "THREAD#", name, value FROM gv$parameter gvp, gv$instance gvi WHERE num IN (SELECT DISTINCT gvpa.num FROM gv$parameter gvpa, gv$parameter gvpb WHERE gvpa.num = gvpb.num AND gvpa.value <> gvpb.value AND (gvpa.isdefault = 'FALSE' OR gvpa.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%') AND gvi.inst_id = gvp.inst_id AND (gvp.isdefault = 'FALSE' OR gvp.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%' ORDER BY 1, 2;
spool off
set markup html off entmap on
set echo on

181
vdh/dg_prim_diag.sql Normal file
View File

@@ -0,0 +1,181 @@
-- NAME: new_dg_prim_diag.sql
--
-- Copyright 2002, Oracle Corporation
--
-- LAST UPDATED: 02-Sep-2015
--
-- USAGE: @new_dg_prim_diag
--
-- (Run from sqlplus on PRIMARY with a LOGICAL or PHYSICAL STANDBY as SYS)
--
-- PURPOSE:
--
-- This script is to be used to assist in the collection of information to
-- help troubleshoot Data Guard issues with a Primary Database
--
-- DISCLAIMER:
--
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
--
-- Script output is as follows:
set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
SELECT TO_CHAR(sysdate,'yyyymmdd_hh24mi') timecol, '.html' spool_extension FROM dual;
column output new_value dbname
SELECT value || '_' output FROM v$parameter WHERE name = 'db_unique_name';
spool new_dg_prim_diag_&&dbname&&timestamp&&suffix
set linesize 2000
set pagesize 50000
set numformat 999999999999999
set trim on
set trims on
set markup html on
set markup html entmap off
set feedback on
ALTER SESSION SET nls_date_format = 'DD-MON-YYYY HH24:MI:SS';
SELECT TO_CHAR(sysdate) time FROM dual;
set echo on
-- In the following output the DATABASE_ROLE should be PRIMARY as that is what this script is intended to be run on.
-- PLATFORM_ID should match the PLATFORM_ID of the standby(s) or conform to the supported options in
-- Note: 413484.1 Data Guard Support for Heterogeneous Primary and Physical Standbys in Same Data Guard Configuration
-- Note: 1085687.1 Data Guard Support for Heterogeneous Primary and Logical Standbys in Same Data Guard Configuration
-- OPEN_MODE should be READ WRITE.
-- LOG_MODE should be ARCHIVELOG.
-- FLASHBACK can be YES (recommended) or NO.
-- If PROTECTION_LEVEL is different from PROTECTION_MODE then for some reason the mode listed in PROTECTION_MODE experienced a need to downgrade.
-- Once the error condition has been corrected the PROTECTION_LEVEL should match the PROTECTION_MODE after the next log switch;
SELECT database_role role, name, db_unique_name, platform_id, open_mode, log_mode, flashback_on, protection_mode, protection_level FROM v$database;
-- FORCE_LOGGING is not mandatory but is recommended.
-- REMOTE_ARCHIVE should be ENABLE.
-- SUPPLEMENTAL_LOG_DATA_PK and SUPPLEMENTAL_LOG_DATA_UI must be enabled if the standby associated with this primary is a logical standby.
-- During normal operations it is acceptable for SWITCHOVER_STATUS to be SESSIONS ACTIVE or TO STANDBY.
-- DG_BROKER can be ENABLED (recommended) or DISABLED.;
column force_logging format a13 tru
column remote_archive format a14 tru
column supplemental_log_data_pk format a24 tru
column supplemental_log_data_ui format a24 tru
column dataguard_broker format a16 tru
SELECT force_logging, remote_archive, supplemental_log_data_pk, supplemental_log_data_ui, switchover_status, dataguard_broker FROM v$database;
-- The following query gives us information about catpatch. From this we can tell if the catalog version doesn''t match the image version it was started with.
column version format a10 tru
SELECT version, modified, status FROM dba_registry WHERE comp_id = 'CATPROC';
-- Check how many threads are enabled and started for this database. If the number of instances below does not match then not all instances are up.
SELECT thread#, instance, status FROM v$thread;
-- The number of instances returned below is the number currently running. If it does not match the number returned in Threads above then not all instances are up.
-- VERSION should match the version from CATPROC above.
-- ARCHIVER can be (STOPPED | STARTED | FAILED). FAILED means that the archiver failed to archive a log last time, but will try again within 5 minutes.
-- LOG_SWITCH_WAIT the ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log switching is waiting for.
-- Note that if ALTER SYSTEM SWITCH LOGFILE is hung, but there is room in the current online redo log, then the value is NULL.
column host_name format a32 wrap
SELECT thread#, instance_name, host_name, version, archiver, log_switch_wait FROM gv$instance ORDER BY thread#;
-- Check how often logs are switching. Log switches should not regularly be occuring in < 20 mins.
-- Excessive log switching is a performance overhead. Whilst rapid log switching is not in itself a Data Guard issue it can affect Data guard.
-- It may also indicate a problem with log shipping. Use redo log size = 4GB or redo log size >= peak redo rate x 20 minutes.
SELECT fs.log_switches_under_20_mins, ss.log_switches_over_20_mins FROM (SELECT SUM(COUNT (ROUND((b.first_time - a.first_time) * 1440) )) "LOG_SWITCHES_UNDER_20_MINS" FROM v$archived_log a, v$archived_log b WHERE a.sequence# + 1 = b.sequence# AND a.dest_id = 1 AND a.thread# = b.thread# AND a.dest_id = b.dest_id AND a.dest_id = (SELECT MIN(dest_id) FROM gv$archive_dest WHERE target='PRIMARY' AND destination IS NOT NULL) AND ROUND((b.first_time - a.first_time) * 1440) < 20 GROUP BY ROUND((b.first_time - a.first_time) * 1440)) fs, (SELECT SUM(COUNT (ROUND((b.first_time - a.first_time) * 1440) )) "LOG_SWITCHES_OVER_20_MINS" FROM v$archived_log a, v$archived_log b WHERE a.sequence# + 1 = b.sequence# AND a.dest_id = 1 AND a.thread# = b.thread# AND a.dest_id = b.dest_id AND a.dest_id = (SELECT MIN(dest_id) FROM gv$archive_dest WHERE target='PRIMARY' AND destination IS NOT NULL) AND ROUND((b.first_time - a.first_time) * 1440) > 19 GROUP BY ROUND((b.first_time - a.first_time) * 1440)) ss;
column minutes format a12
SELECT (CASE WHEN bucket = 1 THEN '<= ' || TO_CHAR(bucket * 5) WHEN (bucket >1 AND bucket < 9) THEN TO_CHAR(bucket * 5 - 4) || ' TO ' || TO_CHAR(bucket * 5) WHEN bucket > 8 THEN '>= ' || TO_CHAR(bucket * 5 - 4) END) "MINUTES", switches "LOG_SWITCHES" FROM (SELECT bucket , COUNT(b.bucket) SWITCHES FROM (SELECT WIDTH_BUCKET(ROUND((b.first_time - a.first_time) * 1440), 0, 40, 8) bucket FROM v$archived_log a, v$archived_log b WHERE a.sequence# + 1 = b.sequence# AND a.dest_id = b.dest_id AND a.thread# = b.thread# AND a.dest_id = (SELECT MIN(dest_id) FROM gv$archive_dest WHERE target = 'PRIMARY' AND destination IS NOT NULL)) b GROUP BY bucket ORDER BY bucket);
-- Check the number and size of online redo logs on each thread.
SELECT thread#, group#, sequence#, bytes, archived ,status FROM v$log ORDER BY thread#, group#;
-- The following query is run to see if standby redo logs have been created in preparation for switchover.
-- The standby redo logs should be the same size as the online redo logs.<br>There should be (( # of online logs per thread + 1) * # of threads) standby redo logs.
-- A value of 0 for the thread# means the log has never been allocated.
SELECT thread#, group#, sequence#, bytes, archived, status FROM v$standby_log order by thread#, group#;
-- This query produces a list of defined archive destinations. It shows if they are enabled, what process is servicing that destination,
-- if the destination is local or remote.
column destination format a35 wrap
column process format a7
column ID format 99
column mid format 99
SELECT thread#, dest_id, destination, target, schedule, process FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- This select will give further detail on the destinations as to what options have been set.
-- Register indicates whether or not the archived redo log is registered in the remote destination control fileOptions.
set numwidth 8
column archiver format a8
column affirm format a6
column error format a55 wrap
column register format a8
SELECT thread#, dest_id, gvad.archiver, transmit_mode, affirm, async_blocks, net_timeout, max_failure, delay_mins, reopen_secs reopen, register, binding FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- The following select will show any errors that occured the last time an attempt to archive to the destination was attempted.
-- If ERROR is blank and status is VALID then the archive completed correctly.
SELECT thread#, dest_id, gvad.status, error, fail_sequence FROM gv$archive_dest gvad, gv$instance gvi WHERE gvad.inst_id = gvi.inst_id AND destination is NOT NULL ORDER BY thread#, dest_id;
-- The query below will determine if any error conditions have been reached by querying the v$dataguard_status view (view only available in 9.2.0 and above).
column message format a80
SELECT gvi.thread#, timestamp, message FROM gv$dataguard_status gvds, gv$instance gvi WHERE gvds.inst_id = gvi.inst_id AND severity in ('Error','Fatal') ORDER BY timestamp, thread#;
-- Query v$managed_standby to see the status of processes involved in the shipping redo on this system.
-- Does not include processes needed to apply redo.
SELECT inst_id, thread#, process, pid, status, client_process, client_pid, sequence#, block#, active_agents, known_agents FROM gv$managed_standby ORDER BY thread#, pid;
-- The following query will determine the current sequence number and the last sequence archived.
-- If you are remotely archiving using the LGWR process then the archived sequence should be one higher than the current sequence.
-- If remotely archiving using the ARCH process then the archived sequence should be equal to the current sequence.
-- The applied sequence information is updated at log switch time.
-- The "Last Applied" value should be checked with the actual last log applied at the standby, only the standby is guaranteed to be correct.
SELECT cu.thread#, cu.dest_id, la.lastarchived "Last Archived", cu.currentsequence "Current Sequence", appl.lastapplied "Last Applied" FROM (select gvi.thread#, gvd.dest_id, MAX(gvd.log_sequence) currentsequence FROM gv$archive_dest gvd, gv$instance gvi WHERE gvd.status = 'VALID' AND gvi.inst_id = gvd.inst_id GROUP BY thread#, dest_id) cu, (SELECT thread#, dest_id, MAX(sequence#) lastarchived FROM gv$archived_log WHERE resetlogs_change# = (SELECT resetlogs_change# FROM v$database) AND archived = 'YES' GROUP BY thread#, dest_id) la, (SELECT thread#, dest_id, MAX(sequence#) lastapplied FROM gv$archived_log WHERE resetlogs_change# = (SELECT resetlogs_change# FROM v$database) AND applied = 'YES' GROUP BY thread#, dest_id) appl WHERE cu.thread# = la.thread# AND cu.thread# = appl.thread# AND cu.dest_id = la.dest_id AND cu.dest_id = appl.dest_id ORDER BY 1, 2;
-- The following select will attempt to gather as much information as possible from the standby.
-- Standby redo logs are not supported with Logical Standby until Version 10.1.
-- The ARCHIVED_SEQUENCE# from a logical standby is the sequence# created by the apply, not the sequence# sent from the primary.
set numwidth 8
column dest_id format 99
column Active format 99
SELECT dest_id, database_mode, recovery_mode, protection_mode, standby_logfile_count, standby_logfile_active FROM v$archive_dest_status WHERE destination IS NOT NULL;
-- Non-default init parameters. For a RAC DB Thread# = * means the value is the same for all threads (SID=*)
-- Threads with different values are shown with their individual thread# and values.
column num noprint
SELECT num, '*' "THREAD#", name, value FROM v$PARAMETER WHERE NUM IN (SELECT num FROM v$parameter WHERE (isdefault = 'FALSE' OR ismodified <> 'FALSE') AND name NOT LIKE 'nls%'
MINUS
SELECT num FROM gv$parameter gvp, gv$instance gvi WHERE num IN (SELECT DISTINCT gvpa.num FROM gv$parameter gvpa, gv$parameter gvpb WHERE gvpa.num = gvpb.num AND gvpa.value <> gvpb.value AND (gvpa.isdefault = 'FALSE' OR gvpa.ismodified <> 'FALSE') AND gvpa.name NOT LIKE 'nls%') AND gvi.inst_id = gvp.inst_id AND (gvp.isdefault = 'FALSE' OR gvp.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%')
UNION
SELECT num, TO_CHAR(thread#) "THREAD#", name, value FROM gv$parameter gvp, gv$instance gvi WHERE num IN (SELECT DISTINCT gvpa.num FROM gv$parameter gvpa, gv$parameter gvpb WHERE gvpa.num = gvpb.num AND gvpa.value <> gvpb.value AND (gvpa.isdefault = 'FALSE' OR gvpa.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%') AND gvi.inst_id = gvp.inst_id AND (gvp.isdefault = 'FALSE' OR gvp.ismodified <> 'FALSE') AND gvp.name NOT LIKE 'nls%' ORDER BY 1, 2;
spool off
set markup html off entmap on
set echo on

34
vdh/display_raw.sql Normal file
View File

@@ -0,0 +1,34 @@
create or replace function display_raw (rawval raw, type varchar2)
return varchar2
is
cn number;
cv varchar2(32);
cd date;
cnv nvarchar2(32);
cr rowid;
cc char(32);
begin
if (type = 'NUMBER') then
dbms_stats.convert_raw_value(rawval, cn);
return to_char(cn);
elsif (type = 'VARCHAR2') then
dbms_stats.convert_raw_value(rawval, cv);
return to_char(cv);
elsif (type = 'DATE') then
dbms_stats.convert_raw_value(rawval, cd);
return to_char(cd);
elsif (type = 'NVARCHAR2') then
dbms_stats.convert_raw_value(rawval, cnv);
return to_char(cnv);
elsif (type = 'ROWID') then
dbms_stats.convert_raw_value(rawval, cr);
return to_char(cnv);
elsif (type = 'CHAR') then
dbms_stats.convert_raw_value(rawval, cc);
return to_char(cc);
else
return 'UNKNOWN DATATYPE';
end if;
end;
/

25
vdh/dnfs_mounts.sql Normal file
View File

@@ -0,0 +1,25 @@
set linesize 230
set pages 50000
column svrname format a15
column dirname format a30
column path format a20
column local format a20
select
distinct
svr.svrname,
svr.dirname,
chn.path,
chn.local
from
v$dnfs_servers svr,
v$dnfs_channels chn
where
svr.svrname = chn.svrname
order by
svr.svrname,
svr.dirname,
chn.path,
chn.local
;

View File

@@ -0,0 +1,22 @@
exec dbms_monitor.serv_mod_act_trace_disable(service_name => 'NAVIS_TRACING');
BEGIN
for l_sessions in
( select sid, serial#
from v$session
where service_name ='NAVIS_TRACING'
and sql_trace = 'ENABLED'
)
loop
dbms_monitor.session_trace_disable
( session_id => l_sessions.sid,
serial_num => l_sessions.serial#
);
end loop;
END;
/

22
vdh/dp_enable_tracing.sql Normal file
View File

@@ -0,0 +1,22 @@
exec dbms_monitor.serv_mod_act_trace_enable(service_name => 'NAVIS_TRACING', WAITS => TRUE, BINDS => TRUE);
BEGIN
for l_sessions in
( select sid, serial#
from v$session
where service_name ='NAVIS_TRACING'
)
loop
dbms_monitor.session_trace_enable
( session_id => l_sessions.sid,
serial_num => l_sessions.serial#,
waits => true,
binds => true
);
end loop;
END;
/

6
vdh/dp_list_sessions.sql Normal file
View File

@@ -0,0 +1,6 @@
column machine format a20
select inst_id, sid, serial#, machine, process, sql_id, sql_child_number, last_call_et, sql_trace
from gv$session
where service_name = 'NAVIS_TRACING'
order by 1, 2;

4
vdh/dplan.sql Normal file
View File

@@ -0,0 +1,4 @@
set lines 180
select * from table(dbms_xplan.display_cursor('&sql_id','&child_no','typical'))
/

2
vdh/dump_block.sql Normal file
View File

@@ -0,0 +1,2 @@
@find_trace
alter system dump datafile &fileno block &blockno;

View File

@@ -0,0 +1,13 @@
select plan_table_output
from table( dbms_xplan.display
( 'dynamic_plan_table',
( select rawtohex(address) || '_' || child_number
from v$sql_plan, v$session
where address = sql_address
and hash_value = sql_hash_value
and sid = 8
and serial# = 822
and rownum = 1
), 'serial'
)
);

View File

@@ -0,0 +1,9 @@
create or replace view dynamic_plan_table
as
select rawtohex(address) || '_' || child_number as statement_id, sysdate timestamp,
operation, options, object_node, object_owner, object_name, 0 object_instance,
optimizer, search_columns, id, parent_id, position, cost, cardinality, bytes,
other_tag, partition_start, partition_stop, partition_id, other, distribution,
cpu_cost, io_cost, temp_space, access_predicates, filter_predicates
from v$sql_plan;

View File

@@ -0,0 +1,8 @@
/*
_level: 12 --> binds and waites
*/
exec dbms_system.set_ev(&_sid, &_serial, 10046, &_level,'');

466
vdh/enqwaits.sql Normal file
View File

@@ -0,0 +1,466 @@
/**********************************************************************
* File: enqwaits.sql
* Type: SQL*Plus script
* Author: Tim Gorman (Evergreen Database Technologies, Inc.)
* Date: 15jan01
*
* Description:
*
* This report is intended to provide a complete picture of the
* dependencies that ensue when session-based enqueues cause other
* sessions to "wait" (or be "blocked").
*
* Modifications:
* TGorman 15jan01 written
*********************************************************************/
REM
REM If your installation is running Oracle Apps R11+, then leave the following
REM two substitution variables containing blank values.
REM
REM If your database is not running Oracle Apps, then assign the string "/*"
REM (i.e. start comment) to the substitution variable START_ORACLE_APPS_CODE and
REM the string "*/" (i.e. end comment) to the substitution variables
REM END_ORACLE_APPS_CODE.
REM
define START_ORACLE_APPS_CODE=""
define END_ORACLE_APPS_CODE=""
spool enqwaits
set echo on feedback on timing on termout off
create or replace procedure enqwaits
(b_GetRelatedSessions in boolean default FALSE)
is
--
cursor get_sqltext(in_address in raw)
is
select SQL_TEXT
from SYS.V_$SQLTEXT
where ADDRESS = in_address
order by PIECE;
--
cursor get_waiters
is
select SID,
TYPE,
DECODE(TYPE,
'BL','Buffer hash table',
'CF','Control File Transaction',
'CI','Cross Instance Call',
'CS','Control File Schema',
'CU','Bind Enqueue',
'DF','Data File',
'DL','Direct-loader index-creation',
'DM','Mount/startup db primary/secondary instance',
'DR','Distributed Recovery Process',
'DX','Distributed Transaction Entry',
'FI','SGA Open-File Information',
'FS','File Set',
'IN','Instance Number',
'IR','Instance Recovery Serialization',
'IS','Instance State',
'IV','Library Cache InValidation',
'JQ','Job Queue',
'KK','Redo Log "Kick"',
'LS','Log Start/Log Switch',
'MB','Master Buffer hash table',
'MM','Mount Definition',
'MR','Media Recovery',
'PF','Password File',
'PI','Parallel Slaves',
'PR','Process Startup',
'PS','Parallel Slaves Synchronization',
'RE','USE_ROW_ENQUEUE Enforcement',
'RT','Redo Thread',
'RW','Row Wait',
'SC','System Commit Number',
'SH','System Commit Number HWM',
'SM','SMON',
'SQ','Sequence Number',
'SR','Synchronized Replication',
'SS','Sort Segment',
'ST','Space Transaction',
'SV','Sequence Number Value',
'TA','Transaction Recovery',
'TD','DDL enqueue',
'TE','Extend-segment enqueue',
'TM','DML enqueue',
'TS','Temporary Segment',
'TT','Temporary Table',
'TX','Transaction',
'UL','User-defined Lock',
'UN','User Name',
'US','Undo Segment Serialization',
'WL','Being-written redo log instance',
'WS','Write-atomic-log-switch global enqueue',
'XA','Instance Attribute',
'XI','Instance Registration',
decode(substr(TYPE,1,1),
'L','Library Cache ('||substr(TYPE,2,1)||')',
'N','Library Cache Pin ('||substr(TYPE,2,1)||')',
'Q','Row Cache ('||substr(TYPE,2,1)||')',
'????')) LOCK_TYPE,
REQUEST,
DECODE(REQUEST,
0, '',
1, 'Null',
2, 'Sub-Share',
3, 'Sub-Exclusive',
4, 'Share',
5, 'Share/Sub-Excl',
6, 'Exclusive',
'<Unknown>') MODE_REQUESTED,
ID1,
ID2
from SYS.V_$LOCK
where REQUEST > 0
and LMODE = 0;
--
cursor get_blockers(in_type in varchar2,
in_id1 in number,
in_id2 in number,
in_sid in number)
is
select SID,
TYPE,
DECODE(TYPE,
'BL','Buffer hash table',
'CF','Control File Transaction',
'CI','Cross Instance Call',
'CS','Control File Schema',
'CU','Bind Enqueue',
'DF','Data File',
'DL','Direct-loader index-creation',
'DM','Mount/startup db primary/secondary instance',
'DR','Distributed Recovery Process',
'DX','Distributed Transaction Entry',
'FI','SGA Open-File Information',
'FS','File Set',
'IN','Instance Number',
'IR','Instance Recovery Serialization',
'IS','Instance State',
'IV','Library Cache InValidation',
'JQ','Job Queue',
'KK','Redo Log "Kick"',
'LS','Log Start/Log Switch',
'MB','Master Buffer hash table',
'MM','Mount Definition',
'MR','Media Recovery',
'PF','Password File',
'PI','Parallel Slaves',
'PR','Process Startup',
'PS','Parallel Slaves Synchronization',
'RE','USE_ROW_ENQUEUE Enforcement',
'RT','Redo Thread',
'RW','Row Wait',
'SC','System Commit Number',
'SH','System Commit Number HWM',
'SM','SMON',
'SQ','Sequence Number',
'SR','Synchronized Replication',
'SS','Sort Segment',
'ST','Space Transaction',
'SV','Sequence Number Value',
'TA','Transaction Recovery',
'TD','DDL enqueue',
'TE','Extend-segment enqueue',
'TM','DML enqueue',
'TS','Temporary Segment',
'TT','Temporary Table',
'TX','Transaction',
'UL','User-defined Lock',
'UN','User Name',
'US','Undo Segment Serialization',
'WL','Being-written redo log instance',
'WS','Write-atomic-log-switch global enqueue',
'XA','Instance Attribute',
'XI','Instance Registration',
decode(substr(TYPE,1,1),
'L','Library Cache ('||substr(TYPE,2,1)||')',
'N','Library Cache Pin ('||substr(TYPE,2,1)||')',
'Q','Row Cache ('||substr(TYPE,2,1)||')',
'????')) LOCK_TYPE,
LMODE,
DECODE(LMODE,
0, '--Waiting--',
1, 'Null',
2, 'Sub-Share',
3, 'Sub-Exclusive',
4, 'Share',
5, 'Share/Sub-Excl',
6, 'Exclusive',
'<Unknown>') MODE_HELD,
ID1,
ID2
from SYS.V_$LOCK
where TYPE = in_type
and ID1 = in_id1
and ID2 = in_id2
and SID <> in_sid
and LMODE > 0
and REQUEST = 0;
--
cursor get_related_sessions(in_sid in number)
is
select S.SID,
S.SERIAL# SNBR,
S.LOGON_TIME,
S.USERNAME,
S.SQL_ADDRESS,
S.STATUS,
S.OSUSER,
P.SPID,
T.XIDUSN || '.' || T.XIDSLOT || '.' || T.XIDSQN TXN_ID,
T.STATUS TXN_STATUS,
T.START_TIME TXN_START_TIME,
T.USED_UBLK,
T.USED_UREC
from SYS.V_$SESSION S1,
SYS.V_$SESSION S,
SYS.V_$PROCESS P,
SYS.V_$TRANSACTION T
where S1.SID = in_sid
and S.PADDR = S1.PADDR
and P.ADDR = S1.PADDR
and T.ADDR (+) = S1.TADDR
order by decode(S.SID, in_sid, 0, S.SID);
--
cursor get_dml_locks(in_sid in number)
is
select o.OWNER,
o.OBJECT_TYPE type,
o.OBJECT_NAME name,
decode(l.LMODE,
0, 'REQUESTED=' ||
DECODE(l.REQUEST,
0, '--Waiting--',
1, 'Null',
2, 'Sub-Share',
3, 'Sub-Exclusive',
4, 'Share',
5, 'Share/Sub-Excl',
6, 'Exclusive',
'<Unknown>'),
'HELD=' ||
DECODE(l.LMODE,
0, '--Waiting--',
1, 'Null',
2, 'Sub-Share',
3, 'Sub-Exclusive',
4, 'Share',
5, 'Share/Sub-Excl',
6, 'Exclusive',
'<Unknown>')) lmode
from sys.V_$LOCK l,
sys.DBA_OBJECTS o
where l.sid = in_sid
and l.type = 'TM'
and o.object_id = l.id1;
--
v_waiter_username varchar2(30);
v_blocker_username varchar2(30);
v_errcontext varchar2(80);
v_errmsg varchar2(300);
--
begin
--
v_errcontext := 'open/fetch get_waiters';
for w in get_waiters loop
--
dbms_output.put_line('.');
v_errcontext := 'open/fetch get_related_sessions (waiters)';
for rw in get_related_sessions(w.sid) loop
--
if w.sid = rw.sid then
--
v_waiter_username := rw.username;
--
&&START_ORACLE_APPS_CODE
v_errcontext := 'query waiters OraApps user info';
begin
select u.user_name
into v_waiter_username
from apps.fnd_logins l,
apps.fnd_user u
where l.spid = rw.spid
and l.login_name = rw.osuser
and l.end_time is null
and l.start_time =
(select max(ll.start_time)
from apps.fnd_logins ll
where ll.spid = l.spid
and ll.end_time is null)
and u.user_id = l.user_id;
exception
when no_data_found then
v_waiter_username := '';
when too_many_rows then
null;
end;
&&END_ORACLE_APPS_CODE
--
v_errcontext := 'PUT_LINE waiters session/lock info';
dbms_output.put_line(substr('Waiter: SID=' || rw.sid ||
' (' || rw.status || '), Logged on at ' ||
to_char(rw.logon_time,'DD-MON HH24:MI'),1,78));
dbms_output.put_line('....... REQUESTED LOCK|MODE=' ||
w.type || ' (' || w.lock_type ||
') | ' || w.mode_requested ||
' (' || w.id1 || ',' || w.id2 || ')');
dbms_output.put_line('....... AppsUser=' || v_waiter_username);
dbms_output.put_line('....... OS PID=' || rw.spid);
--
else
--
if b_GetRelatedSessions = FALSE then
--
exit; -- ...exit from "get_related_sessions" cursor loop
--
end if;
--
v_errcontext := 'PUT_LINE related waiters session info';
dbms_output.put_line(substr('... Related waiting SID=' ||
rw.sid || ' (' || rw.status ||
'), Logged on at ' ||
to_char(rw.logon_time,'DD-MON HH24:MI'),1,78));
--
end if;
--
dbms_output.put_line('.... TXN ID=' || rw.txn_id ||
' (' || rw.txn_status || ') started=' ||
rw.txn_start_time || ' undo=' || rw.used_ublk || 'b/' ||
rw.used_urec || 'r');
--
v_errcontext := 'open/fetch get_dml_locks (waiters)';
for d in get_dml_locks(rw.sid) loop
--
dbms_output.put_line(substr('....... DML Lock: ' ||
d.owner || '.' || d.name || ' (' || d.type || ') - LOCK ' ||
d.lmode,1,78));
--
v_errcontext := 'fetch/close get_dml_locks (waiters)';
--
end loop; /* end of "get_dml_locks (waiters)" cursor loop */
--
dbms_output.put_line('.... SQL Statement currently executing:');
v_errcontext := 'open/fetch waiters get_sqltext';
for t in get_sqltext(rw.sql_address) loop
--
dbms_output.put_line('....... ' || t.sql_text);
--
v_errcontext := 'fetch/close waiters get_sqltext';
--
end loop; /* end of "get_sqltext" cursor loop */
--
v_errcontext := 'fetch/close get_related_sessions (waiters)';
--
end loop; /* end of "get_related_sessions (waiters)" cursor loop */
--
v_errcontext := 'open/fetch get_blockers';
for b in get_blockers(w.type, w.id1, w.id2, w.sid) loop
--
v_errcontext := 'open/fetch get_related_sessions (blockers)';
for rb in get_related_sessions(b.sid) loop
--
if b.sid = rb.sid then
--
v_blocker_username := rb.username;
--
&&START_ORACLE_APPS_CODE
v_errcontext := 'query blockers OraApps user info';
begin
select u.user_name
into v_blocker_username
from apps.fnd_logins l,
apps.fnd_user u
where l.spid = rb.spid
and l.login_name = rb.osuser
and l.end_time is null
and l.start_time =
(select max(ll.start_time)
from apps.fnd_logins ll
where ll.spid = l.spid
and ll.end_time is null)
and u.user_id = l.user_id;
exception
when no_data_found then
v_blocker_username := '';
when too_many_rows then
null;
end;
&&END_ORACLE_APPS_CODE
--
v_errcontext := 'PUT_LINE blockers session/lock info';
dbms_output.put_line(substr('==>BLOCKER: SID=' || rb.sid ||
',' || rb.snbr ||
' (' || rb.status || '), Logged on at ' ||
to_char(rb.logon_time,'DD-MON HH24:MI'),1,78));
dbms_output.put_line('........... HELD LOCK|MODE=' ||
b.type || ' (' || b.lock_type ||
') | ' || b.mode_held);
dbms_output.put_line('........... AppsUser=' ||
v_blocker_username);
dbms_output.put_line('........... OS PID=' || rb.spid);
--
else
--
if b_GetRelatedSessions = FALSE then
--
exit; -- ...exit from "get_related_sessions" cursor loop
--
end if;
--
v_errcontext := 'PUT_LINE related blockers session info';
dbms_output.put_line(substr('...... Related BLOCKER: SID='
|| rb.sid || ' (' || rb.status ||
'), Logged on at ' ||
to_char(rb.logon_time,'DD-MON HH24:MI'),1,78));
--
end if;
--
dbms_output.put_line('....... TXN ID=' || rb.txn_id ||
' (' || rb.txn_status || ') started=' ||
rb.txn_start_time || ' undo=' || rb.used_ublk || 'b/' ||
rb.used_urec || 'r');
--
v_errcontext := 'open/fetch get_dml_locks (blockers)';
for d in get_dml_locks(rb.sid) loop
--
dbms_output.put_line(substr('........... DML Lock: ' ||
d.owner || '.' || d.name || ' (' || d.type || ') - LOCK ' ||
d.lmode,1,78));
--
v_errcontext := 'fetch/close get_dml_locks (blockers)';
--
end loop; /* end of "get_dml_locks (blockers)" cursor loop */
--
dbms_output.put_line('....... SQL currently executing (not necessarily the blocking SQL):');
v_errcontext := 'open/fetch get_sqltext (blockers)';
for t in get_sqltext(rb.sql_address) loop
--
dbms_output.put_line('........... ' || t.sql_text);
--
v_errcontext := 'fetch/close get_sqltext (blockers)';
--
end loop; /* end of "get_sqltext (blockers)" cursor loop */
--
v_errcontext := 'fetch/close get_related_sessions (blockers)';
--
end loop; /* end of "get_related_sessions (blockers)" cursor loop */
--
v_errcontext := 'fetch/close get_blockers';
--
end loop; /* end of "get_blockers" cursor loop */
--
v_errcontext := 'fetch/close get_waiters';
--
end loop; /* end of "get_waiters" cursor loop */
--
exception
when others then
v_errmsg := substr(sqlerrm, 1, 300);
raise_application_error(-20001, v_errcontext||': '||v_errmsg);
end enqwaits;
/
set termout on
show errors
spool off

64
vdh/esfc_hit_ratio.sql Normal file
View File

@@ -0,0 +1,64 @@
----------------------------------------------------------------------------------------
--
-- File name: esfc_hit_ratio.sql
--
-- Purpose: Calculates a psuedo hit ratio for cell flash cache on Exadata storage.
--
-- Author: Kerry Osborne
--
-- Description:
--
-- The script uses the total number of cell single block physical read" events
-- plus the total number of "cell multiblock physical read" events as an
-- approximation of the total number of reads and compares that to total number
-- of cell flash cache hits.
--
--
-- Note: This script does not produce accurate results. The calculated
-- hit ratio will be overstated. A better approach is to evaluate
-- average access times for read operations such as the
-- "cell single block physical read" event. Cache hits should be
-- under 1ms while missed will be on the order of a few ms.
--
-- In fact, the results may be wildy overstated in cases where
-- objects are aggressively cached in ESFC due to the storage
-- parameter CELL_FLASH_CACHE being set to KEEP, as this causes
-- Smart Scans to use the flash cache as well.
--
-- See kerryosborne.oracle-guy.com for additional information.
---------------------------------------------------------------------------------------
set pages 999
set lines 140
column c1 heading 'Event|Name' format a30 trunc
column c2 heading 'Total|Waits' format 99,999,999
column c3 heading 'Seconds|Waiting' format 9,999,999
column c5 heading 'Average|Wait|(ms)' format 9999.9
column c6 heading 'Flash Cache Hits' for 999,999,999,999
col hit_ratio heading 'Hit Ratio' for 999.999
select
'cell single + multiblock reads' c1,
c2, c3, c5, c6,
c6/decode(nvl(c2,0),0,1,c2) hit_ratio
from (
select
sum(total_waits) c2,
avg(value) c6,
sum(time_waited / 100) c3,
avg((average_wait /100)*1000) c5
from
sys.v_$system_event, v$sysstat ss
where
event in (
'cell single block physical read',
'cell multiblock physical read')
and
name like 'cell flash cache read hits'
and
event not like '%Idle%')
order by
c3
;
ttitle off

6
vdh/esfc_keep_tables.sql Normal file
View File

@@ -0,0 +1,6 @@
select owner, table_name, status, last_analyzed, num_rows, blocks, degree, cell_flash_cache
from dba_tables
where owner like nvl('&owner',owner)
and table_name like nvl('&table_name',table_name)
and cell_flash_cache like nvl('&cell_flash_cache','KEEP')
/

View File

@@ -0,0 +1,53 @@
-- uses the statistics data to estimate the compression ratio of OLTP / HCC compressed table partitions
-- for this it estimates the uncompressed table size by multiplying the average row length with the number of rows and devide the result by the block size
-- to get the current (compressed) table size, the number of blocks (as stored by the cbo) is multiplied by the block size, rather then getting it from dba_segments.
-- this to avoid errors when the cbo statistics are stale (as the calculation for the compressed and uncompressed size are based upon the same data)
column compress_for format a20 heading "compression method"
column max_compr_size_mb format 9G999G999D99 heading "max|compressed size (MB)"
column min_compr_size_mb format 9G999G999D99 heading "min|compressed size (MB)"
column avg_compr_size_mb format 9G999G999D99 heading "avg|compressed size (MB)"
column max_uncompr_size_mb format 999G999G999D99 heading "max|uncompressed size (MB)"
column min_uncompr_size_mb format 999G999G999D99 heading "min|uncompressed size (MB)"
column avg_uncompr_size_mb format 999G999G999D99 heading "avg|uncompressed size (MB)"
column max_pct_compression format 990D00 heading "max|% compression"
column min_pct_compression format 990D00 heading "min|% compression"
column avg_pct_compression format 990D00 heading "avg|% compression"
column nbr_samples format 9G999G999 heading "# samples"
select
prt.compress_for,
max(prt.avg_row_len * prt.num_rows)/1024/1024 max_uncompr_size_mb,
min(prt.avg_row_len * prt.num_rows)/1024/1024 min_uncompr_size_mb,
avg(prt.avg_row_len * prt.num_rows)/1024/1024 avg_uncompr_size_mb,
max(prt.blocks * tbs.block_size)/1024/1024 max_compr_size_mb,
min(prt.blocks * tbs.block_size)/1024/1024 min_compr_size_mb,
avg(prt.blocks * tbs.block_size)/1024/1024 avg_compr_size_mb,
max(100 - (100 / (prt.avg_row_len * prt.num_rows)) * (prt.blocks * tbs.block_size)) max_pct_compression,
min(100 - (100 / (prt.avg_row_len * prt.num_rows)) * (prt.blocks * tbs.block_size)) min_pct_compression,
avg(100 - (100 / (prt.avg_row_len * prt.num_rows)) * (prt.blocks * tbs.block_size)) avg_pct_compression,
count(*) nbr_samples
from
dba_tab_partitions prt,
dba_tablespaces tbs
where
prt.tablespace_name = tbs.tablespace_name
-- only compressed partitions
and prt.compress_for in ('OLTP', 'QUERY LOW', 'QUERY HIGH', 'ARCHIVE LOW', 'ARCHIVE HIGH')
-- no subpartitioned tables
and prt.subpartition_count = 0
-- only partitions that are analyzed
and prt.last_analyzed is not null
-- filter out empty partitions
and prt.avg_row_len > 0
and prt.num_rows > 0
and prt.blocks > 0
-- filter out too small tables (less than 100MB worth of blocks), as they skew up the result
and prt.blocks >= (104857600 / tbs.block_size)
-- filter out tables that are compressed not smaller than uncompressed (perhaps compression enabled after loading and not yet moved?)
and (100 / (prt.avg_row_len * prt.num_rows)) * (prt.blocks * tbs.block_size) < 100
group by
prt.compress_for
order by
prt.compress_for
/

23
vdh/expiring_accounts.sql Normal file
View File

@@ -0,0 +1,23 @@
column username format a30
column profile format a20
column account_status format a15
column creation_date format a20
column expiry_date format a11
select
username, profile, account_status,
to_char(created, 'DD/MM/YYYY HH24:MI:SS') creation_date,
to_char(expiry_date, 'DD/MM/YYYY') expiry_date
from
dba_users
where
( account_status = 'EXPIRED'
or account_status = 'EXPIRED(GRACE)'
)
or ( account_status = 'OPEN'
and expiry_date is not null
)
order by
username
;

View File

@@ -0,0 +1,43 @@
set linesize 200
column username format a30
column created format a10
column lock_date format a10
column expiry_date format a10
column profile format a20
column account_status format a25
column default_tablespace format a20
column temporary_tablespace format a20
column initial_rsrc_consumer_group format a30
select
username,
to_char(created, 'DD/MM/YYYY') created,
profile,
account_status,
to_char(lock_date,'DD/MM/YYYY') lock_date,
to_char(expiry_date,'DD/MM/YYYY') expiry_date,
default_tablespace,
temporary_tablespace,
initial_rsrc_consumer_group
from
dba_users
where
expiry_date is not null
and username not in
( 'SYS', 'SYSTEM', 'TSMSYS', 'WMSYS','SYSMAN', 'OUTLN', 'DBSNMP', 'PERFSTAT', 'UPTIME',
'ANALYZETHIS', 'AQ_ADMINISTRATOR_ROLE', 'AQ_USER_ROLE', 'DBA', 'DELETE_CATALOG_ROLE',
'EXECUTE_CATALOG_ROLE', 'EXP_FULL_DATABASE', 'GATHER_SYSTEM_STATISTICS', 'HS_ADMIN_ROLE',
'IMP_FULL_DATABASE', 'LOGSTDBY_ADMINISTRATOR', 'OEM_MONITOR', 'OUTLN', 'PANDORA', 'PERFSTAT',
'PUBLIC', 'SELECT_CATALOG_ROLE', 'SYS', 'SYSTEM', 'WMSYS', 'WM_ADMIN_ROLE', 'TIVOLI_ROLE',
'CONNECT', 'JAVADEBUGPRIV', 'RECOVERY_CATALOG_OWNER', 'RESOURCE', 'TIVOLI', 'JAVASYSPRIV',
'GLOBAL_AQ_USER_ROLE', 'JAVAUSERPRIV', 'JAVAIDPRIV', 'EJBCLIENT', 'JAVA_ADMIN', 'JAVA_DEPLOY',
'MGMT_USER','MGMT_VIEW','OEM_ADVISOR','ORACLE_OCM','SCHEDULER_ADMIN','DIP','APPQOSSYS', 'ANONYMOUS',
'CTXSYS','EXFSYS','MDSYS','ORDDATA','ORDPLUGINS','ORDSYS','SI_INFORMTN_SCHEMA','XDB','DATAPUMP_EXP_FULL_DATABASE',
'DATAPUMP_IMP_FULL_DATABASE','ADM_PARALLEL_EXECUTE_TASK','CTXAPP','DBFS_ROLE','HS_ADMIN_EXECUTE_ROLE','HS_ADMIN_SELECT_ROLE',
'ORDADMIN','XDBADMIN','XDB_SET_INVOKER','XDB_WEBSERVICES','XDB_WEBSERVICES_OVER_HTTP','XDB_WEBSERVICES_WITH_PUBLIC',
'AUTHENTICATEDUSER','JMXSERVER','OLAPSYS'
)
order by
username
;

62
vdh/extmap.sql Normal file
View File

@@ -0,0 +1,62 @@
/**********************************************************************
* File: extmap.sql
* Type: SQL*Plus script
* Author: Tim Gorman (Evergreen Database Technologies, Inc.)
* Date: 26-Aug-99
*
* Description:
* Simple report against the DBA_EXTENTS view for Oracle8
* databases. This report is intended to be run periodically
* (i.e. daily or several times daily), each time overwriting
* itself.
*
* The report's main purpose is to provide a mapping of objects
* and their extents by the datafiles in the database, so that
* in the event of the need for an "object point-in-time"
* recovery, only the necessary datafiles need to be restored
* and recovered in the CLONE database.
*
* This report is one of those you hope you never have to use,
* but if you need it, you'll kiss me full on the lips for giving
* it to you!
*
* Modifications:
*********************************************************************/
whenever oserror exit failure
whenever sqlerror exit failure
set pagesize 1000 linesize 500 trimspool on echo off feedback off timing off -
pause off verify off recsep off
break on owner
col instance new_value V_INSTANCE noprint
select lower(replace(t.instance,chr(0),'')) instance
from v$thread t,
v$parameter p
where p.name = 'thread'
and t.thread# = to_number(decode(p.value,'0','1',p.value));
col seg format a30 heading "Owner.Name" word_wrap
col location format a43 heading "TableSpace:FileName" word_wrap
col exts format 990 heading "#Exts"
select e.owner || '.' || e.segment_name ||
decode(e.partition_name,'','',' ('||e.partition_name||')') seg,
e.tablespace_name || ':' || f.file_name location,
count(distinct e.block_id) exts
from sys.dba_extents e,
sys.dba_data_files f
where e.segment_type in
('CLUSTER','LOBINDEX','LOBSEGMENT','TABLE','TABLE PARTITION')
and f.file_id = e.relative_fno
group by e.owner || '.' || e.segment_name ||
decode(e.partition_name,'','',' ('||e.partition_name||')'),
e.tablespace_name || ':' || f.file_name
order by 1, 2
set termout off
spool extmap_&&V_INSTANCE
/
exit success

56
vdh/filestat.sql Normal file
View File

@@ -0,0 +1,56 @@
/**********************************************************************
* File: filestat.sql
* Type: SQL*Plus script
* Author: Tim Gorman (Evergreen Database Technologies, Inc.)
* Date: 20-Sep-99
*
* Description:
* Report against the V$FILESTAT table to detect the tablespace
* with the greatest I/O load, according the volume of reads and
* writes, weighted against the average I/O time...
*
* Because this script depends on the timing information in the
* V$FILESTAT view, please be sure to have the configuration
* TIMED_STATISTICS set to TRUE to get the full value of this
* report...
*
* Modifications:
*********************************************************************/
col ts_name format a25 truncate
col sort0 noprint
col io format a43 heading "Reads Writes|Rqsts,Blks,#Bks/Rqst"
col rds format a25 heading "Reads|Rds/Bks(#bks/Rd)"
col wrts format a25 heading "Writes|Wrts/Bks(#bks/Wrt)"
col avgiotim format 999990.0
set echo on feedback off timing off trimspool on pages 1000 lines 500
col instance new_value V_INSTANCE noprint
select lower(replace(t.instance,chr(0),'')) instance
from v$thread t,
v$parameter p
where p.name = 'thread'
and t.thread# = to_number(decode(p.value,'0','1',p.value));
spool filestat_&&V_INSTANCE
select avg(nvl(s.avgiotim,0)) * sum(nvl(s.phyrds,0) + nvl(s.phywrts,0)) sort0,
f.tablespace_name ts_name,
ltrim(to_char(sum(s.phyrds))) || ',' ||
ltrim(to_char(sum(s.phyblkrd))) || ',' ||
ltrim(to_char(sum(s.phyblkrd)/
decode(sum(s.phyrds),0,1,sum(s.phyrds)),'990.0'))
|| ' | ' ||
ltrim(to_char(sum(s.phywrts))) || ',' ||
ltrim(to_char(sum(s.phyblkwrt))) || ',' ||
ltrim(to_char(sum(s.phyblkwrt)/
decode(sum(s.phywrts),0,1,sum(s.phywrts)),'990.0')) io,
avg(s.avgiotim) avgiotim
from v$filestat s,
dba_data_files f
where f.file_id = s.file#
group by
f.tablespace_name
order by sort0 desc, ts_name
/
spool off

9
vdh/find_trace.sql Normal file
View File

@@ -0,0 +1,9 @@
column tracefile_name for a120
select
value tracefile_name
from
v$diag_info
where
name = 'Default Trace File'
;

157
vdh/findilos.sh Normal file
View File

@@ -0,0 +1,157 @@
#!/bin/bash
#
# findilos - Search a local network segment for iLOs
# The iLO is the Integrated Lights-Out management processor
# used on HP ProLiant and BladeSystem servers
#
scriptversion="1.0"
#
# Author: iggy@nachotech.com
#
# Website: http://blog.nachotech.com
#
# Requires: tr sed expr curl nmap
#
# Tested with: Nmap 4.20, curl 7.17.1, RHEL4
#
# Note: Discovery of an iLO is dependent upon the Virtual Media port
# being set to the default of 17988. If this has been changed
# by the iLO administrator, then this script will NOT find it.
#
# Also, if the iLO XML Reply Data Return has been Disabled by
# the iLO administrator, this script will not be able to
# gather any information about the server. It will still be
# discovered, but all you will see is its IP address.
#
# GLOBAL VARIABLES
scriptname="findilos"
iloips="/tmp/tmpilos.$$"
iloxml="/tmp/tmpiloxml.$$"
ilohwvers="/tmp/tmpilohwvers.$$"
declare -i ilosfound=0
# FUNCTIONS
function parseiloxml {
fgrep "$1" $iloxml > /dev/null 2>&1
if [ $? -ne 0 ]
then
# tag not found in xml output, return empty string
parsedstring="N/A"
else
# tag was found - now we parse it from the output
tempstring=$( cat $iloxml | tr -d -c [:print:] | sed "s/^.*<$1>//" | sed "s/<.$1.*//")
# trim off leading and trailing whitespace
parsedstring=`expr match "$tempstring" '[ \t]*\(.*[^ \t]\)[ \t]*$'`
fi
}
function is_installed {
which $1 > /dev/null 2>&1
if [ $? -ne 0 ]
then
printf "\nERROR: %s not installed.\n\n" $1
exit 255
fi
}
# MAIN
# check for tools that we depend upon
is_installed tr
is_installed sed
is_installed expr
is_installed curl
is_installed nmap
# check syntax - should have 1 and only 1 parameter on cmdline
if [ $# -ne 1 ]; then
printf "%s %s ( http://blog.nachotech.com/ )\n" $scriptname $scriptversion
printf "Usage: %s {target network specification}\n" $scriptname
printf "TARGET NETWORK SPECIFICATION:\n"
printf " Can pass hostnames, IP addresses, networks, etc.\n"
printf " Ex: server1.company.com, company.com/24, 192.168.0.1/16, 10.0.0-255.1-254\n"
printf "EXAMPLE:\n"
printf " %s 16.32.64.0/22\n" $scriptname
exit 255
fi
iprange=$1
# prepare lookup file for iLO hardware versions
cat > $ilohwvers << EOF
iLO-1 shows hw version ASIC: 2
iLO-2 shows hw version ASIC: 7
iLO-3 shows hw version ASIC: 8
iLO-3 shows hw version ASIC: 9
iLO-4 shows hw version ASIC: 12
iLO-5 shows hw version ASIC: 21
i-iLO shows hw version T0
EOF
#
# scan a range of IP addresses looking for an
# open tcp port 17988 (the iLO virtual media port)
#
printf "Scanning..."
nmap -n -P0 -sS -p 17988 -oG - $iprange | fgrep /open/ | awk '{print $2}' > $iloips
printf "\n\n"
#
# open and read the list of IP addresses one at a time
#
exec 3< $iloips
echo "--------------- ------ -------- ------------ ------------------------- -----------------------------"
echo "iLO IP Address iLO HW iLO FW Server S/N Server Model License key"
echo "--------------- ------ -------- ------------ ------------------------- -----------------------------"
while read iloip <&3 ; do
ilosfound=$ilosfound+1
#
# attempt to read the xmldata from iLO, no password required
#
curl --proxy "" --fail --silent --max-time 3 http://$iloip/xmldata?item=All > $iloxml
curl -sqk "http://$iloip/xmldata?item=CpqKey" >> $iloxml
#
# parse out the Server model (server product name)
# from the XML output
#
parseiloxml SPN; servermodel=$parsedstring
parseiloxml SBSN; sernum=$parsedstring
parseiloxml PN; ilotype=$parsedstring
parseiloxml FWRI; ilofirmware=$parsedstring
parseiloxml HWRI; ilohardware=$parsedstring
parseiloxml KEY; ilolicensekey=$parsedstring
ilohwver=$(grep "$ilohardware" $ilohwvers|awk '{print $1}')
if [ "$ilohwver" == "" ]; then
ilohwver="N/A"
fi
if [ "$sernum" == "" ]; then
sernum="N/A"
fi
printf "%-15s %-6s %-8s %-12s %-25s %s\n" $iloip "$ilohwver" "$ilofirmware" "$sernum" "$servermodel" "$ilolicensekey"
done
printf "\n%d iLOs found on network target %s.\n\n" $ilosfound $iprange
rm -f $iloips $iloxml $ilohwvers
exit 0

View File

@@ -0,0 +1,13 @@
set linesize 120
column SL_MB format 999G999D99
column SU_MB format 999G999D99
column SR_MB format 999G999D99
column SF_MB format 999G999D99
column SF_PCT format 999D00
column nbr_files format 9G999G999G999
column name format a30
select name, space_limit/1024/1024 SL_MB, space_used/1024/1024 SU_MB, space_reclaimable/1024/1024 SR_MB,
(space_limit - space_used + space_reclaimable)/1024/1024 SF_MB, (100 * (space_limit - space_used + space_reclaimable)/space_limit ) SF_PCT,
number_of_files nbr_files
from v$recovery_file_dest;

View File

@@ -0,0 +1,145 @@
o/*select fusg.file_type, decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, round(nvl(fusg.space_used, 0)/ra.space_limit, 4) * 100),
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, round(nvl(fusg.space_reclaimable, 0)/ra.space_limit, 4) * 100),
nvl2(ra.name, fusg.number_of_files, 0)
SQL> descr v$flash_recovery_area_usage
Name Null? Type
----------------------------------------------------------------- -------- --------------------------------------------
FILE_TYPE VARCHAR2(12)
PERCENT_SPACE_USED NUMBER
PERCENT_SPACE_RECLAIMABLE NUMBER
NUMBER_OF_FILES NUMBER
*/
select fusg.file_type, decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(ra.space_limit, 0))/1024/1024 space_limit_mb,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(fusg.space_used, 0)) space_used,
decode(nvl2(ra.name, ra.space_limit, 0), 0, 0, nvl(fusg.space_reclaimable, 0)) space_reclaimable,
nvl2(ra.name, fusg.number_of_files, 0) number_of_files
from v$recovery_file_dest ra,
( select 'CONTROLFILE' file_type,
sum( case when ceilasm = 1 and name like '+%'
then ceil(((block_size*file_size_blks)+1)/1048576)*1048576
else block_size*file_size_blks
end
) space_used,
0 space_reclaimable, count(*) number_of_files
from v$controlfile,
( select /*+ no_merge*/ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
union all
select 'ONLINELOG' file_type,
sum( case when ceilasm = 1 and member like '+%'
then ceil((l.bytes+1)/1048576)*1048576
else l.bytes
end
) space_used,
0 space_reclaimable, count(*) number_of_files
from ( select group#, bytes
from v$log
union
select group#, bytes
from v$standby_log
) l, v$logfile lf,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where l.group# = lf.group#
and lf.is_recovery_dest_file = 'YES'
union all
select 'ARCHIVELOG' file_type,
sum(al.file_size) space_used,
sum( case when dl.rectype = 11
then al.file_size
else 0
end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and name like '+%'
then ceil(((blocks*block_size)+1)/1048576)*1048576
else blocks * block_size
end file_size
from v$archived_log,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and name is not null
) al,
x$kccagf dl
where al.recid = dl.recid(+)
and dl.rectype(+) = 11
union all
select 'BACKUPPIECE' file_type,
sum(bp.file_size) space_used,
sum ( case when dl.rectype = 13
then bp.file_size
else 0
end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and handle like '+%'
then ceil((bytes+1)/1048576)*1048576
else bytes
end file_size
from v$backup_piece,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and handle is not null
) bp,
x$kccagf dl
where bp.recid = dl.recid(+)
and dl.rectype(+) = 13
union all
select 'IMAGECOPY' file_type,
sum(dc.file_size) space_used,
sum( case when dl.rectype = 16
then dc.file_size
else 0 end
) space_reclaimable,
count(*) number_of_files
from ( select recid,
case when ceilasm = 1 and name like '+%'
then ceil(((blocks*block_size)+1)/1048576)*1048576
else blocks * block_size
end file_size
from v$datafile_copy,
( select /*+ no_merge */ ceilasm
from x$krasga
)
where is_recovery_dest_file = 'YES'
and name is not null
) dc,
x$kccagf dl
where dc.recid = dl.recid(+)
and dl.rectype(+) = 16
union all
select 'FLASHBACKLOG' file_type,
nvl(fl.space_used, 0) space_used,
nvl(fb.reclsiz, 0) space_reclaimable,
nvl(fl.number_of_files, 0) number_of_files
from ( select sum( case when ceilasm = 1 and name like '+%'
then ceil((fl.bytes+1)/1048576)*1048576
else bytes
end
) space_used,
count(*) number_of_files
from v$flashback_database_logfile fl,
( select /*+ no_merge */ ceilasm
from x$krasga
)
) fl,
( select sum(to_number(fblogreclsiz)) reclsiz
from x$krfblog
) fb
) fusg

Some files were not shown because too many files have changed in this diff Show More