Saturday, August 30, 2008

Copy an archivelog file from ASM to disk using RMAN

backup as copy archivelog from logseq=78 until logseq=78 format ‘/tmp/%t%r%s%p’;

# or

backup as copy archivelog sequence between 22804 and 22804 thread 2 format '/tmp/22804.arc';

CATALOG ARCHIVELOG '/disk1/arch_logs/archive1_731.dbf',
'/tmp/archive1_78.dbf';

RAC node shutdown / startup

# shutdown services on a RAC node
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl stop instance -d p01cfd -i pl01cfd2 -o abort
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl stop asm -n t-prod-oranode-11
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl stop nodeapps -n t-prod-oranode-11


# startup services on a RAC node
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl start nodeapps -n t-prod-oranode-11
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl start asm -n t-prod-oranode-11
sudo -u oracle /opt/oracle/product/10.2.0/crs/bin/srvctl start instance -d p01cfd -i pl01cfd2

Thursday, August 28, 2008

What do filesystemio_options and disk_asynch_io Do?

From Metalink note 432854.1

What do filesystemio_options and disk_asynch_io Do?
disk_asynch_io is a kind of master switch, which turns on or off Async I/O to database files on any type of storage, whether it's raw device or filesystem. The filesystemio_options parameter gives finer control over I/O to database files on filesystems. It allows you to turn off async I/O to filesystem files but keep async I/O to raw devices if the "master" switch disk_asynch_io is set to true.

Instance initialization parameter filesystemio_options has four options:
1. "asynch" : means buffered I/O + Async I/O
2. "directIO" : means Direct I/O only
3. "setall" : means Direct I/O + Async I/O
4. "none" : disables Async I/O and Direct I/O

One should always use at least Direct I/O with OCFS/OCFS2. In fact one does not have choice as the database automatically adds that mode whenever it sees the file is on an OCFS volume or OCFS2 volume mounted with the datavolume mount option.
If the user wants aio with OCFS/OCFS2, use setall.
If the user wants aio with ASM/ASMlib, he is expected to set filesystemio_options=setall and disk_asynch_io=true, this is because ASM's io is transparent to RDBMS, but these 2 parameters will impact the behaviors of RDBMS, please see Note 413389.1 .

Wednesday, August 27, 2008

Oracle 10g proxy users

If you don't have the password of an account, but you need to connect to test or extract DDL, Oracle 10g allows you to connect as a proxy (instead of resetting the password). This also allows you to have one schema, but allow many named users to connect, meaning no need for grants, roles, or synonyms.

This also means that one named user can be a proxy for many different application schemas depending on the connect string.

# allow oradba to connect to bi_staging as a proxy
alter user bi_staging grant connect through oradba;

# connect to the bi_staging account using the oradba password
sqlplus oradba[bi_staging]/oradba_password

# check the environment
show user
select user from dual;
select sys_context('USERENV','PROXY_USER') from dual;

Using dbms_metadata.get_ddl

In Oracle 10g sys.link$ no longer contains a clear text password, so if you need to recreate the database links you need to use the encrypted password. e.g

SET LONG 10000
SELECT dbms_metadata.get_ddl('DB_LINK', 'MY_LINK')FROM dual;

CREATE DATABASE LINK "MY_LINK"
CONNECT TO "TEST_ACCOUNT" IDENTIFIED BY VALUES '05E6F2E494A9AA49D4E0CA5F42300237FE'
USING 'TEST_DB';

Thursday, August 21, 2008

NLS_LANG diagnostics

select parameter, value from v$nls_parameters where parameter in ('NLS_CHARACTERSET','NLS_NCHAR_CHARACTERSET');

echo $NLS_LANG

# run csscan to check for issues when export/import between databases with different characterset

CSSCAN SYSTEM/MANAGER FULL=y TOCHAR=AL32UTF8 ARRAY=102400 PROCESS=3

Wednesday, August 20, 2008

Transportable tablespace

# on the source database

ALTER TABLESPACE test READ ONLY;
select file_name from dba_data_files where tablespace_name = 'TEST';

exp
userid=\"sys/change_on_install@olddb as sysdba\"
transport_tablespace=y
tablespaces=test
triggers=y
constraints=y
grants=y
file=test.dmp

copy test_01.dbf to the filesystem for the destination database

ALTER TABLESPACE fact1 READ WRITE;

# on the destination database

imp
userid=\"sys/change_on_install@newdb as sysdba\"
transport_tablespace=y
datafiles='test_01.dbf'
file=test.dmp

ALTER TABLESPACE test READ WRITE;
select file_name from dba_data_files where tablespace_name = 'TEST';

External table to select from Oracle listener log

create directory LISTENER_LOG_DIR
as '/u01/app/oracle/10.1/db1/network/log'
/

create table listener_log
(
log_date date,
connect_string varchar2(300),
protocol_info varchar2(300),
action varchar2(15),
service_name varchar2(15),
return_code number(10)
)
organization external (
type oracle_loader
default directory LISTENER_LOG_DIR
access parameters
(
records delimited by newline
nobadfile
nologfile
nodiscardfile
fields terminated by "*" lrtrim
missing field values are null
(
log_date char(30) date_format
date mask "DD-MON-YYYY HH24:MI:SS",
connect_string,
protocol_info,
action,
service_name,
return_code
)
)
location ('listener_prolin01.log')
)
reject limit unlimited
/

External table to select from Oracle alert log

create or replace directory bdump
as '/u01/app/oracle/admin/ts01/bdump';

create table alert_log ( text varchar2(400) )
organization external (
type oracle_loader
default directory BDUMP
access parameters (
records delimited by newline
nobadfile
nodiscardfile
nologfile
)
location('alert_ts01.log')
)
reject limit unlimited
/

Tuesday, August 19, 2008

External table

create or replace directory test_dir as '/bie_oraexp';
grant read,write on directory test_dir to robert;

CREATE TABLE ext_test (id)
ORGANIZATION EXTERNAL
(TYPE oracle_datapump DEFAULT DIRECTORY test_dir LOCATION ('test_table.csv'))
as select * from table1;

UTL_FILE test

create or replace directory test_dir as '/bie_oraexp';
grant read,write on directory test_dir to robert;

create or replace procedure robert.utl_file_test_write (firstline in varchar2)
is
output_file utl_file.file_type;
begin
output_file := utl_file.fopen ('TEST_DIR','test_file.txt', 'W');
utl_file.put_line (output_file, firstline);
utl_file.fclose(output_file);
end;
/

Monday, August 18, 2008

Drop all Oracle users created in the last year

select 'drop user '||username||' cascade;' from dba_users where created > sysdate -365;

Unusable indexes can't be skipped if they are a primary key.

create table test(id integer primary key, text varchar2(10));
create index text_index on test(text);
insert into test values (1,'test1');
alter table test move tablespace users;

NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
skip_unusable_indexes boolean TRUE

SQL> select table_name, index_name, status from user_indexes;

TABLE_NAME INDEX_NAME STATUS
------------------------------ ------------------------------ --------
TEST SYS_C005223 UNUSABLE
TEST TEXT_INDEX UNUSABLE

SQL> select * from test where id = 1;

ID TEXT
---------- ----------
1 test1

SQL> update test set text = 'test2';

1 row updated.

SQL> update test set id = '2';
update test set id = '2'
*
ERROR at line 1:
ORA-01502: index 'ROBERT.SYS_C005223' or partition of such index is in unusable
state

Saturday, August 16, 2008

RMAN examples

# cold backup
rman
connect target /
connect catalog rcowner/password@rman
configure controlfile autobackup on;
startup mount;
run {
allocate channel disk1 device type disk format '/backup/rman_%d_%T_%s_%p';
backup as compressed backupset database;
}

# compressed backup
connect target /
RUN {
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
BACKUP as compressed backupset database plus archivelog;
}

# point-in-time restore
# restore controlfile
RMAN
CONNECT TARGET /
connect catalog rcowner/password@rman
set dbid 3298136637
RUN
{
sql 'alter session set NLS_DATE_FORMAT="YYYY-MM-DD HH24:MI:SS"';
SET UNTIL TIME '2008-06-03 13:30:00';
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
RESTORE CONTROLFILE;
}

# restore database
# exit rman and login again (to avoid ORA-01460)
RMAN
CONNECT TARGET /
connect catalog rcowner/password@rman
RUN
{
sql 'alter session set NLS_DATE_FORMAT="YYYY-MM-DD HH24:MI:SS"';
SET UNTIL TIME '2008-06-03 13:30:00';
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
ALTER DATABASE MOUNT;
RESTORE DATABASE;
}

# recover database
RUN
{
sql 'alter session set NLS_DATE_FORMAT="YYYY-MM-DD HH24:MI:SS"';
SET UNTIL TIME '2008-06-03 14:00:00';
RECOVER DATABASE;
}

# open database
sqlplus '/ as sysdba'
ALTER DATABASE OPEN RESETLOGS;


# backup archivelogs for a specific day
backup archivelog from time 'SYSDATE-38' until time 'SYSDATE-37' filesperset 50 format '/arch1_%d_%u/';

# backup recovery area
BACKUP RECOVERY AREA;

# Incremental backups

ALTER DATABASE ENABLE BLOCK CHANGE TRACKING USING FILE '/oracle/admin/test/bct.dbf';

SELECT * FROM V$BLOCK_CHANGE_TRACKING;

BACKUP DEVICE TYPE SBT INCREMENTAL FROM SCN 750923 DATABASE;
BACKUP INCREMENTAL FROM SCN 750923 DATABASE;
BACKUP DEVICE TYPE DISK INCREMENTAL FROM SCN 750983 DATABASE FORMAT '/tmp/incr_standby_%U';

BACKUP AS COMPRESSED INCREMENTAL LEVEL 1 TAG = WEEKLY DATABASE PLUS ARCHIVELOG;

# rolling incremental backups

RUN {
RECOVER COPY OF DATABASE WITH TAG 'incr_backup' UNTIL TIME 'SYSDATE - 7';
BACKUP INCREMENTAL LEVEL 1 FOR RECOVER OF COPY WITH TAG 'incr_backup' DATABASE;
}

# Rename datafile

select 'set newname for datafile '||file_id||' to '''||file_name||''';' from dba_data_files;

Check for scheduled and running Discoverer jobs

# all jobs

SELECT F.USER_NAME||' '||R.BR_WORKBOOK_NAME||' '||R.BR_JOB_ID
FROM DISCOVR_APPS.EUL5_BATCH_REPORTS R, APPS.FND_USER F, DBA_JOBS J
WHERE F.USER_ID = SUBSTR(R.BR_CREATED_BY,2,4)
AND J.JOB = R.BR_JOB_ID
ORDER BY F.USER_NAME;

# running jobs

SELECT /*+ ordered */ J.SID, R.BR_JOB_ID, F.USER_NAME, R.BR_WORKBOOK_NAME
FROM DISCOVR_APPS.EUL5_BATCH_REPORTS R, APPS.FND_USER F, DBA_JOBS_RUNNING J
WHERE F.USER_ID = SUBSTR(R.BR_CREATED_BY,2,4)
AND J.JOB = R.BR_JOB_ID;

Profile Options changes to enable diagnostics

If you set the profile option "FND: Diagnostics" to "yes" and "Utilities: Diagnostics" to "yes" and "Hide Diagnostics menu entry" to "no", that will give the user of the form the ability to trace.

Find the forms session holding a lock

SELECT to_char(start_time,'HH:MI'),SUBSTR(d.user_name,1,30) "User Name"
, a.pid
, b.sid
, b.serial#
FROM v$process a, v$session b, fnd_logins c, fnd_user d
WHERE a.pid = c.pid
AND d.user_id = c.user_id
AND a.addr = b.paddr
AND c.end_time IS NULL
and to_char(c.start_time,'DD-MON-YYYY') = '11-MAR-2007'
and b.sid = 28
order by start_time;

Shared Pool Usage

select 100-round(a.bytes/b.sm*100,2) pctused from
(select bytes from v$sgastat where name='free memory' AND pool='shared pool') a,
(select sum(bytes) sm from v$sgastat where pool = 'shared pool') b;

UNDO tablespace usage

# UNDO tablespace usage

select a.process, a.program, a.module, a.machine, b.USED_UREC, c.sql_text
from v$sql c, v$session a, v$transaction b
where b.addr = a.taddr
and a.sql_address = c.address
and a.sql_hash_value = c.hash_value
order by b.USED_UREC;

SELECT s.sid , s.username , t.used_ublk
FROM v$transaction t
, v$session s
WHERE 1 = 1
AND t.ses_addr = s.saddr

column username format a15;
column segment_name format a15;
SELECT s.sid , s.username , t.used_ublk, round((t.used_ublk*8)/1024) size_in_MB_8kb_Block_size, round((t.used_ublk*16)/1024
) size_in_MB_16kb_Block_size
FROM v$transaction t
, v$session s
WHERE 1 = 1
AND t.ses_addr = s.saddr;

SELECT distinct rpad(s.sid,3) "SID",S.USERNAME,
E.SEGMENT_NAME,
T.START_TIME "Start",
rpad(T.STATUS,9) "Status",
round((t.used_ublk*8)/1024) "Size(MB)"
--T.USED_UBLK||' Blocks and '||T.USED_UREC||' Records' "Rollback Usage"
FROM DBA_DATA_FILES DF,
DBA_EXTENTS E,
V$SESSION S,
V$TRANSACTION T
WHERE DF.TABLESPACE_NAME = E.TABLESPACE_NAME AND
DF.FILE_ID = UBAFIL AND
S.SADDR = T.SES_ADDR AND
T.UBABLK BETWEEN E.BLOCK_ID AND E.BLOCK_ID+E.BLOCKS AND
E.SEGMENT_TYPE in( 'ROLLBACK','TYPE2 UNDO')

TEMP tablespace usage

#TEMP tablespace usage
column tablespace format a12
column username format a12
break on username nodup skip 1
select se.username
,se.sid
,su.extents
,su.blocks * to_number(rtrim(p.value)) as Space
,tablespace
,segtype
from v$sort_usage su
,v$parameter p
,v$session se
where p.name = 'db_block_size'
and su.session_addr = se.saddr
order by space , se.username, se.sid;

# coalesce qmn queues

# coalesce qmn queues

Select 'alter table applsys.aq$_'||queue_table||'_I coalesce;'
From dba_queues
Where owner = 'APPLSYS'
Union
Select 'alter table applsys.aq$_'||queue_table||'_T coalesce;'
From dba_queues
Where owner = 'APPLSYS'
Union
Select 'alter table applsys.aq$_'||queue_table||'_H coalesce;'
From dba_queues
Where owner = 'APPLSYS';

Check if a patch has been applied to Oracle Apps

select patch_level from fnd_product_installations where patch_level like '%FND%';
select patch_name from ad_applied_patches where patch_name in('5137963','4968815','4898130');
select bug_number from ad_bugs where bug_number in('5137963','4968815','4898130');

Test URLS for Oracle Apps 11i

# Test URLS after a clone or upgrade
http://server:8062/dev60cgi/f60cgi
http://server:8062/OA_HTML/US/ICXINDEX.htm
http://server:8062/pls/gfiinfd1/fnd_web.ping
http://server:8062/servlets/IsItWorking
http://server:8062/servlets/oracle.apps.icx.common.ConnectionTest
http://server:8062/OA_HTML/jsp/fnd/aoljtest.jsp
http://server:8062/servlets/oracle.apps.bne.framework.BneCaboTestServlet
http://server:8062/oa_servlets/BNETEST

# Oracle Applications Manager :-
http://server:8050/servlets/weboam/oam/oamLogin

# Diagnostics
http://server:8060/OA_HTML/jtfqalgn.htm

To speed up large patchsets or merge patches with adpatch

# to run a large merge patch in minimum time without multiple compiles etc turn off the unnecessary options. After the patch has completed, you will need to manually run these steps using adadmin.

adpatch options=nogenform,nocompiledb,noautoconfig,nocompilejsp

#To run adpatch in silent mode, first edit adalldefaults.txt then run patches as follows :-

adpatch defaultsfile=$APPL_TOP/admin/$TWO_TASK/adalldefaults.txt patchtop=/TARGZ2TEMP/test/FPKRUP1/5610862_JA workers=15 options=nogenform,nocompiledb,noautoconfig,nocompilejsp logfile=u5610862_JA.log

Use FNDCPASS to change passwords

# change the apps password
FNDCPASS apps/apps 0 Y system/manager SYSTEM APPLSYS newappspassword

# change non-apps passwords
select 'FNDCPASS apps/apps 0 Y system/manager ORACLE '||oracle_username||' '||'newpassword;' from fnd_oracle_userid
where oracle_username not in ('APPS','APPS_MRC','APPLSYS','APPLSYSPUB','CTXSYS','EDWREP','PORTAL30_SSO','PORTAL30');

# change a user password
FNDCPASS apps/apps 0 Y system/manager USER sysadmin newpassword

java.lang.NoClassDefFoundError: oracle/apps/jtf/cache/CacheManager

java.lang.NoClassDefFoundError: oracle/apps/jtf/cache/CacheManager
1. Edit $APPL_TOP/admin/adjborg2.txt and comment out the line which has the reference to cache.jar or cache.zip.
2. Run adadmin Regenerate JAR Files.

Kill all unix processes matching a string

ps -ef |grep FNDLIBR | grep -v grep | awk '{print $2}' | xargs kill -9

WFRESGEN

WFRESGEN apps/apps 0 Y DATABASE /d01/app/applmgr/product/r11i/fnd/11.0.28/resource/US/wfcfg_ANZINTE.msg

Check for datafile corruption with dbverify

dbv FILE=/ghrqa2-ds1/ora10/oradata/ghrqa2/applsysd11.dbf BLOCKSIZE=8192

Manually compile a form using f45gen

f45gen userid=apps/apps module=ZGLX_ACCINQUIRY_US.fmb

Use ar25run to test a report

ar25run userid=apps/apps batch=yes destype=file desname=test.out desformat=HPW.prt \
report=/db11/applmgr/product/10.7_sgtst/mlapr/1.0.0/srw/MLFA0018.rdf \
P_BOOK='ML SGD FA BOOK' \
P_PERIOD1='JAN-2003' \
P_PERIOD2='FEB-2003' \
P_REPORT_TYPE='COST' \
P_ENTITY='2770' \
P_GL_ACCOUNT='1311600094' \
P_FR_COSTCTR='785087' \
P_TO_COSTCTR='785Z36'

Run a script across multiple databases

for dbservername in `cat /tmp/ora_server.lst`
do
echo `date '+%b %d %Y %T'` Checking $dbservername for something for Robert
$ORACLE_HOME/bin/sqlplus -s username/password@$dbservername @scanner.sql >> /tmp/robert_$dbservername
done

dbms_jobs

# check the status of jobs
select broken, failures from dba_jobs;

# execute a job
execute dbms_job.run (1);

# set jobs to broken so they don't run again
select 'execute dbms_job.broken('||job||',TRUE,'''||sysdate||''');' from dba_jobs where log_user = 'APPS' and broken = 'N';

Load a new workflow

WFLOAD apps/apps 0 Y UPLOAD FGIFA_ACCGEN_WF.wft

Find Unsuccessful Logins in the last 24 hours

select fu.user_name||' '||to_char(ful.attempt_time,'DD-MON-YYYY:HH24:MI')||' '||ful.login_name||' '||ful.terminal_id
from fnd_user fu, FND_UNSUCCESSFUL_LOGINS ful
where ful.user_id = fu.user_id (+)
and ful.attempt_time > sysdate - 1;

User rcp to copy a directory

rcp -rp applmgr@server1:/db02/applmgr/product/10.7_hkpro ./

Use RSYNC to synchronise two directories

# First setup ssh keys to allow login with no password

rsync -avz -e ssh --delete xxx@server1:/xxx/ /xxx/ --delete

Send a test email from Unix

/usr/ucb/mail -s "Test Email" test@test.com

Get explain plan for select statement

explain plan for select * from gl_balances order by period_name;
select plan_table_output from table(dbms_xplan.display('plan_table'));

or

set autotrace on statistics;
select x from y;

or

alter session set timed_statistics=true;
alter session set max_dump_file_size=unlimited;
sys.dbms_support.start_trace(waits=>true, binds=>true)
{{{{ Put the code to be traced here}}}}
sys.dbms_support.stop_trace()

# turn on trace for a running session

select 'exec sys.dbms_system.set_ev('||sid||','||serial#||',10046,12,'''');' from v$session where sid in ('75');

begin
sys.dbms_system.set_sql_trace_in_session(165,31125,TRUE );
end;

tkprof sambprd_ora_29857.trc gps.tkp explain=username/password

ORADEBUG SETOSPID 21237
ORADEBUG EVENT 10046 TRACE NAME CONTEXT FOREVER, LEVEL 4

Identify concurrent requests using the most system resource.

select fcp.USER_CONCURRENT_PROGRAM_NAME||'^'||count(*)||'^'||trunc(sum(actual_completion_date - actual_start_date)*24)
from fnd_concurrent_programs_tl fcp,
fnd_concurrent_requests fcr
where fcp.concurrent_program_id = fcr.concurrent_program_id
and fcp.language = 'US'
group by fcp.USER_CONCURRENT_PROGRAM_NAME
order by sum(actual_completion_date - actual_start_date)*24 desc;

Identify concurrent requests using a specific printer.

select
to_char(fcr.actual_completion_date,'dd-mon-yyyy:hh24:mi')||' '||
fcr.request_id||' '||
fu.user_name||' '||
fcp.user_concurrent_program_name
from fnd_concurrent_requests fcr, fnd_concurrent_programs_tl fcp, fnd_user fu
where fcr.concurrent_program_id = fcp.concurrent_program_id
and fcr.requested_by = fu.user_id
and fcp.language = 'US'
and fcr.number_of_copies > 0
and upper('hkfinp09') like upper(fcr.printer)
and sysdate - fcr.actual_completion_date < 30
order by fcr.actual_completion_date;

Create a public database link

create public database link xxx connect to user identified by password using 'xxx';

Adpatch hotpatch

Apply a small patch without the need to enable maintenance mode.

adpatch options=hotpatch

tar and gzip via a unix pipe to save space

# tar and gzip via a unix pipe
mknod mypipe p
gzip -c <> 9.2.0.tar.gz &
tar cvf mypipe 9.2.0

or

tar cf - gfiprd1appl | gzip -1 -c > gfiprd1appl.tar.gz

# gunzip and untar in one command via a unix pipe
gunzip < 9.2.0.tar.gz | tar xvf -

or

gzip -dc 920.tar.gz | tar -xvf -

Friday, August 15, 2008

ASM diagnostics statements.

SELECT GROUP_NUMBER, DISK_NUMBER, TOTAL_MB/1024 total, free_mb/1024 used, NAME FROM v$asm_disk;

select group_number, name, total_mb/1024 total_gb, free_mb/1024 free_gb
from v$asm_diskgroup;

select type, sum(bytes) from v$asm_file group by type;

Rollback a patch with opatch

PATH=$PATH:$ORACLE_HOME/OPatch
opatch lsinventory
opatch rollback -id 6646853 -all_nodes
opatch lsinventory

create an spfile on ASM from a pfile in the OS

create spfile='+DISK/spfile' from pfile='/private/init.ora';

Reclaim space by purging the DBA Recycle Bin

SQL> select count(*), sum(space) from dba_recyclebin;

COUNT(*) SUM(SPACE)
---------- ----------
187 85504

purge dba_recyclebin;


# Flashback a dropped table.

select type, object_name, original_name from recyclebin;


TYPE OBJECT_NAME
------------------------- ------------------------------
ORIGINAL_NAME
--------------------------------
TABLE BIN$X/qPdSN0kATgQNwKilpFfQ==$0
X

TABLE BIN$YRYsJpVJzxngQNwKilpz2Q==$0
ADDRESS

INDEX BIN$YRYsJpVIzxngQNwKilpz2Q==$0
IDX_ADDR_DELETED

INDEX BIN$YRYsJpVHzxngQNwKilpz2Q==$0
IDX_ADDR_CITYSTATE

INDEX BIN$YRYsJpVGzxngQNwKilpz2Q==$0
IDX_ADDR_ADDRESS

INDEX BIN$YRYsJpVFzxngQNwKilpz2Q==$0
IDX_ADDR_ADDRESSID


flashback table address to before drop;

Using kfed to debug raw devices used in ASM.

Login as oracle, set the environment to ASM ORACLE_HOME, and compile the kfed program.

$cd $ORACLE_HOME/rdbms/lib
$ make -f ins_rdbms.mk ikfed

kfed read /dev/asm_disk501 text=/tmp/disk501

Test the impact of NOLOGGING on an Oracle Dataguard Standby

Recommended practice for Dataguard Standby databases is to run "alter database force logging;" to ensure all transactions on the primary database are logged and applied to the standby.

Following is a basic test with diagnostics to confirm redo volume reduction, unrecoverable transaction, and impact to Dataguard standby databases.

Basic results indicate :-
1. significant reduction in redo (there will still be UNDO REDO generated)
2. logical corruption in the tables in the standby database which will stop select statements until the table data is reloaded (e.g drop and recreate)

NOLOGGING is valid for the following operations :-

direct load (SQL*Loader)
direct load INSERT (using APPEND hint)
CREATE TABLE ... AS SELECT
CREATE INDEX
ALTER TABLE ... MOVE PARTITION
ALTER TABLE ... SPLIT PARTITION
ALTER INDEX ... SPLIT PARTITION
ALTER INDEX ... REBUILD
ALTER INDEX ... REBUILD PARTITION
INSERT, UPDATE, and DELETE on LOBs in NOCACHE NOLOGGING mode stored out of line

# On the primary database
# check logging

select force_logging from v$database;
select tablespace_name from dba_tablespaces where logging = 'NOLOGGING';
select logging from user_tables where table_name = 'TEST';

SQL> truncate table test;
Table truncated.
SQL> alter database force logging;
Database altered.
SQL> alter tablespace users logging;
Tablespace altered.
SQL> alter table test logging;
Table altered.
SQL> set autotrace ON statistics
SQL> insert /*+ APPEND */
into test select 'this is a nologging test' from dual nologging; 2

1 row created.


Statistics
----------------------------------------------------------
165 recursive calls
7 db block gets
21 consistent gets
2 physical reads
8624 redo size
820 bytes sent via SQL*Net to client
780 bytes received via SQL*Net from client
3 SQL*Net roundtrips to/from client
6 sorts (memory)
0 sorts (disk)
1 rows processed

SQL> set autotrace off
SQL> select file#, unrecoverable_change#, to_char(unrecoverable_time,'hh:mi') from v$datafile where UNRECOVERABLE_CHANGE# > 0;

FILE# UNRECOVERABLE_CHANGE# TO_CH
---------- --------------------- -----
26 2.4329E+10 02:48

SQL> alter database no force logging;

Database altered.

SQL> alter tablespace users nologging;

Tablespace altered.

SQL> alter table test nologging;

Table altered.

SQL> truncate table test;

Table truncated.

SQL> set autotrace ON statistics
SQL> insert /*+ APPEND */
into test select 'this is a nologging test' from dual nologging; 2

1 row created.


Statistics
----------------------------------------------------------
3 recursive calls
7 db block gets
2 consistent gets
2 physical reads
448 redo size
820 bytes sent via SQL*Net to client
780 bytes received via SQL*Net from client
3 SQL*Net roundtrips to/from client
2 sorts (memory)
0 sorts (disk)
1 rows processed

SQL> select file#, unrecoverable_change#, to_char(unrecoverable_time,'hh:mi') from v$datafile where UNRECOVERABLE_CHANGE# > 0;

FILE# UNRECOVERABLE_CHANGE# TO_CH
---------- --------------------- -----
26 2.4329E+10 02:51



# On the Dataguard standby

SQL> select * from test;
ERROR:
ORA-01578: ORACLE data block corrupted (file # 26, block # 42)
ORA-01110: data file 26:
'/sltbko1/oradata/SLTBKO1/datafile/o1_mf_users_3vty8wdy_.dbf'
ORA-26040: Data block was loaded using the NOLOGGING option

SQL> analyze table test validate structure cascade;
analyze table test validate structure cascade
*
ERROR at line 1:
ORA-01578: ORACLE data block corrupted (file # 26, block # 31)
ORA-01110: data file 26:
'/sltbko1/oradata/SLTBKO1/datafile/o1_mf_users_3vty8wdy_.dbf'
ORA-26040: Data block was loaded using the NOLOGGING option

!oerr ora 26040
26040, 00000, "Data block was loaded using the NOLOGGING option\n"
//* Cause: Trying to access data in block that was loaded without
//* redo generation using the NOLOGGING/UNRECOVERABLE option
//* Action: Drop the object containing the block.

How to suspend and resume a running Oracle process

SQL> select spid, pid from v$process where addr = (select paddr from v$session where sid = 1871);

SPID PID
------------ ----------
729304 32

SQL> oradebug setospid 729304
Oracle pid: 32, Unix process pid: 729304, image: oracle@p-test-oranode-30 (TNS V1-V3)
SQL> oradebug suspend
Statement processed.
SQL> oradebug resume
Statement processed.

Connect to a blocked instance using UR=A

ASM1 =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = [hostname])(PORT = [portN]))
(CONNECT_DATA =
(SERVER = DEDICATED)(SERVICE_NAME = +ASM)(INSTANCE_NAME = +ASM1)
(UR=A)
)
)

Clone ORACLE_HOME

For a single-node Oracle install it is usually faster to clone the ORACLE_HOME than to re-install and re-apply patches.
Cloning can be done using OEM or manually using clone.pl or runInstaller
Cloning guarantees that you have identical components installed on DEV, QA, PROD.
You can only clone between the same OS - e.g. from AIX to AIX.
It is possible to clone the ASM and DB ORACLE_HOME but not the OEM agent.

The process I followed to manually clone on AIX is as follows. You can find more information in Metalink Note 565009.1.

#1. prepare for cloning (this step currently does not do anything useful but it may be necessary in the future)
cd $ORACLE_HOME/clone/bin
chmod u+x prepare_clone.pl
vi prepare_clone.pl
change /usr/local/bin/perl to /usr/bin/perl
prepare_clone.pl

#2. copy all files under $ORACLE_HOME to the destination directory or destination server (e.g use tar/ftp or scp).

#3. run the clone process (create new inventory or register the ORACLE_HOME with existing inventory, relink binaries etc)
# check the contents of /etc/oraInst.loc
more /etc/oraInst.loc
cd $ORACLE_HOME/clone/bin
chmod u+x clone.pl
vi clone.pl
change /usr/local/bin/perl to /usr/bin/perl
# example clone the ASM ORACLE_HOME (clone.pl calls oui)
perl clone.pl ORACLE_HOME="/opt/oracle/product/10.2.0/asm" ORACLE_HOME_NAME="ASM_HOME"
# example clone the DB ORACLE_HOME
perl clone.pl ORACLE_HOME="/opt/oracle/product/10.2.0/db_1" ORACLE_HOME_NAME="db10g_home1"
# check the inventory to confirm the ORACLE_HOME is registered
more /opt/oracle/product/oraInventory/ContentsXML/inventory.xml

#4. ask unix team to run root.sh in each ORACLE_HOME

#5. for the ASM ORACLE_HOME ask the unix team to run the following as root to setup the cluster software
cd $ORACLE_HOME/bin
localconfig delete
localconfig add

# if you have problems cloning with clone.pl you can directly call runInstaller but you do need an Xterm or VNC GUI session.

./runInstaller -clone ORACLE_HOME="/opt/oracle/product/10.2.0/owb" ORACLE_HOME_NAME="owb10g_home1"

or

$ORACLE_HOME/oui/bin/runInstaller -clone -silent -invPtrLoc $ORACLE_HOME/oraInst.loc ORACLE_HOME="$ORACLE_HOME" ORACLE_HOME_NAME="ORA10G2" -ignoreSysPrereqs | tee >> $LOG

The ultimate Oracle performance tuning parameter.

#I found this one the other day. Useful to speed up imports, but make sure you have a good backup or an updated CV before you use it.

_disable_logging = true

#data is still written to the log buffer, but is not written to the redo logs.
#if the instance dies, (crash or shutdown abort) there's no data in the online redo logs to do instance recovery.
#DO NOT USE THIS UNLESS YOU UNDERSTAND THE IMPACT !!!

Oracle profiles and password verification

Rem
Rem $Header: utlpwdmg.sql 31-aug-2000.11:00:47 nireland Exp $
Rem
Rem utlpwdmg.sql
Rem
Rem Copyright (c) Oracle Corporation 1996, 2000. All Rights Reserved.
Rem
Rem NAME
Rem utlpwdmg.sql - script for Default Password Resource Limits
Rem
Rem DESCRIPTION
Rem This is a script for enabling the password management features
Rem by setting the default password resource limits.
Rem
Rem NOTES
Rem This file contains a function for minimum checking of password
Rem complexity. This is more of a sample function that the customer
Rem can use to develop the function for actual complexity checks that the
Rem customer wants to make on the new password.
Rem
Rem MODIFIED (MM/DD/YY)
Rem nireland 08/31/00 - Improve check for username=password. #1390553
Rem nireland 06/28/00 - Fix null old password test. #1341892
Rem asurpur 04/17/97 - Fix for bug479763
Rem asurpur 12/12/96 - Changing the name of password_verify_function
Rem asurpur 05/30/96 - New script for default password management
Rem asurpur 05/30/96 - Created
Rem

-- This script sets the default password resource parameters
-- This script needs to be run to enable the password features.
-- However the default resource parameters can be changed based
-- on the need.
-- A default password complexity function is also provided.
-- This function makes the minimum complexity checks like
-- the minimum length of the password, password not same as the
-- username, etc. The user may enhance this function according to
-- the need.
-- This function must be created in SYS schema.
-- connect sys/ as sysdba before running the script

CREATE OR REPLACE FUNCTION verify_function
(username varchar2,
password varchar2,
old_password varchar2)
RETURN boolean IS
n boolean;
m integer;
differ integer;
isdigit boolean;
ischar boolean;
ispunct boolean;
digitarray varchar2(20);
punctarray varchar2(25);
chararray varchar2(52);

BEGIN
digitarray:= '0123456789';
chararray:= 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
punctarray:='!"#$%&()``*+,-/:;<=>?_';

-- Check if the password is same as the username
IF NLS_LOWER(password) = NLS_LOWER(username) THEN
raise_application_error(-20001, 'Password same as or similar to user');
END IF;

-- Check for the minimum length of the password
IF length(password) < 4 THEN
raise_application_error(-20002, 'Password length less than 4');
END IF;

-- Check if the password is too simple. A dictionary of words may be
-- maintained and a check may be made so as not to allow the words
-- that are too simple for the password.
IF NLS_LOWER(password) IN ('welcome', 'database', 'account', 'user', 'password', 'oracle', 'computer', 'abcd') THEN
raise_application_error(-20002, 'Password too simple');
END IF;

-- Check if the password contains at least one letter, one digit and one
-- punctuation mark.
-- 1. Check for the digit
isdigit:=FALSE;
m := length(password);
FOR i IN 1..10 LOOP
FOR j IN 1..m LOOP
IF substr(password,j,1) = substr(digitarray,i,1) THEN
isdigit:=TRUE;
GOTO findchar;
END IF;
END LOOP;
END LOOP;
IF isdigit = FALSE THEN
raise_application_error(-20003, 'Password should contain at least one digit, one character and one punctuation');
END IF;
-- 2. Check for the character
<>
ischar:=FALSE;
FOR i IN 1..length(chararray) LOOP
FOR j IN 1..m LOOP
IF substr(password,j,1) = substr(chararray,i,1) THEN
ischar:=TRUE;
GOTO findpunct;
END IF;
END LOOP;
END LOOP;
IF ischar = FALSE THEN
raise_application_error(-20003, 'Password should contain at least one \
digit, one character and one punctuation');
END IF;
-- 3. Check for the punctuation
<>
ispunct:=FALSE;
FOR i IN 1..length(punctarray) LOOP
FOR j IN 1..m LOOP
IF substr(password,j,1) = substr(punctarray,i,1) THEN
ispunct:=TRUE;
GOTO endsearch;
END IF;
END LOOP;
END LOOP;
IF ispunct = FALSE THEN
raise_application_error(-20003, 'Password should contain at least one \
digit, one character and one punctuation');
END IF;

<>
-- Check if the password differs from the previous password by at least
-- 3 letters
IF old_password IS NOT NULL THEN
differ := length(old_password) - length(password);

IF abs(differ) < 3 THEN
IF length(password) < length(old_password) THEN
m := length(password);
ELSE
m := length(old_password);
END IF;

differ := abs(differ);
FOR i IN 1..m LOOP
IF substr(password,i,1) != substr(old_password,i,1) THEN
differ := differ + 1;
END IF;
END LOOP;

IF differ < 3 THEN
raise_application_error(-20004, 'Password should differ by at \
least 3 characters');
END IF;
END IF;
END IF;
-- Everything is fine; return TRUE ;
RETURN(TRUE);
END;
/

-- This script alters the default parameters for Password Management
-- This means that all the users on the system have Password Management
-- enabled and set to the following values unless another profile is
-- created with parameter values set to different value or UNLIMITED
-- is created and assigned to the user.

ALTER PROFILE DEFAULT LIMIT
PASSWORD_LIFE_TIME 60
PASSWORD_GRACE_TIME 10
PASSWORD_REUSE_TIME 1800
PASSWORD_REUSE_MAX UNLIMITED
FAILED_LOGIN_ATTEMPTS 3
PASSWORD_LOCK_TIME 1/1440
PASSWORD_VERIFY_FUNCTION verify_function;

Finding orphan mview log registrations

# If someone creates a fast refresh mview in a database it registers with the primary database mview log, and the log keeps all transactions until the next mview refresh. If the destination database is deleted, the primary database mview log will keep growing. The following will check for mview log registrations that have not been refreshed recently.

select * from SYS.SLOG$;

Copy archivelogs from ASM to OS

# To copy archivelogs from ASM to OS using rman :-

rman
connect target /
backup as copy archivelog from logseq=78 until logseq=78 format ‘/tmp/%t%r%s%p’;

# To copy archivelogs from ASM to OS using DBMS_FILE_TRANSFER (can copy to other servers using database link) :-

CREATE DIRECTORY archdir AS '+DG1/orcl/arch';
CREATE DIRECTORY tempdir AS '/archivelog';
set serverout on
DECLARE
v_archivedir VARCHAR2(30) := 'ARCHDIR';
v_tempdir VARCHAR2(30) := 'TEMPDIR';
v_asm_logname VARCHAR2(100);
v_unix_logname VARCHAR2(100);
v_first_log_seq NUMBER := 12;
v_last_log_seq NUMBER := 14;
v_log_seq VARCHAR2(5);
CURSOR c_logs IS
SELECT name
FROM v$archived_log
WHERE sequence# BETWEEN v_first_log_seq AND v_last_log_seq
ORDER BY sequence#;
BEGIN
FOR i IN c_logs LOOP
v_asm_logname := SUBSTR(i.name, 16);
v_log_seq := SUBSTR(v_asm_logname,4,5);
v_unix_logname := 'orcl_arc'||v_log_seq||'.log';
DBMS_FILE_TRANSFER.COPY_FILE(v_archivedir,
v_asm_logname,
v_tempdir,
v_unix_logname);
DBMS_OUTPUT.PUT_LINE(v_asm_logname||' copied to '||v_unix_logname||'.');
END LOOP;
END;
/

Catch system errors with an After Servererror trigger

create table t ( msg varchar2(4000) );

create or replace trigger snapshot_too_old
after servererror on database
declare
l_sql_text ora_name_list_t;
l_n number;
begin
if ( is_servererror(1555) )
then
insert into t values ( 'ora_sysevent = ' || ora_sysevent );
insert into t values ( 'ora_login_user = ' || ora_login_user );
insert into t values ( 'ora_server_error = ' || ora_server_error(1) );

l_n := ora_sql_txt( l_sql_text );
for i in 1 .. l_n
loop
insert into t values ( 'l_sql_text(' || i || ') = ' || l_sql_text(i) );
end loop;
end if;
end;
/

Auditing the program logging into Oracle

# unfortunately Oracle Audit (aud$) does not record the program logging in. A login trigger can help with this.

create table
stats$user_log
(
user_id varchar2(30),
session_id number(8),
host varchar2(30),
last_program varchar2(48),
last_action varchar2(32),
last_module varchar2(32),
logon_day date,
logon_time varchar2(10),
logoff_day date,
logoff_time varchar2(10),
elapsed_minutes number(8)
)
;

# for all users

create or replace trigger
logon_audit_trigger
AFTER LOGON ON DATABASE
BEGIN
insert into stats$user_log values(
user,
sys_context('USERENV','SESSIONID'),
sys_context('USERENV','HOST'),
null,
null,
null,
sysdate,
to_char(sysdate, 'hh24:mi:ss'),
null,
null,
null
);
END;
/

# or for a single user

CREATE OR REPLACE TRIGGER "LOGON_AUDIT_TRIGGER" AFTER
LOGON ON DATABASE
DECLARE
sess number(10);
prog varchar2(70);
BEGIN
IF user = 'MYUSERNAME' THEN
sess := sys_context('USERENV','SESSIONID');
SELECT program INTO prog FROM v$session WHERE audsid = sess
and rownum<=1;
INSERT INTO stats$user_log VALUES (
user,sys_context('USERENV','SESSIONID'),
sys_context('USERENV','HOST'),
prog,
sysdate,
sys_context('USERENV','OS_USER'));
END IF;
END;

Run CSSCAN to check Oracle characterset conversion

csscan SYSTEM/MANAGER FULL=y TOCHAR=UTF8 ARRAY=102400 PROCESS=3

Audit an Oracle user with a Logon Trigger

CREATE OR REPLACE TRIGGER sadmin_logon_trg
AFTER LOGON ON DATABASE
WHEN (user="SCOTT") -- remove this line to turn it on for all users.
BEGIN
-- audit code here
END;

Enable Oracle Flashback

# adjust db_recovery_file_dest_size depending on space and expected archive / flashback volume
alter system set db_recovery_file_dest_size=300G;

# change db_recovery_file_dest depeding on which ASM disk group will be used
alter system set db_recovery_file_dest='+ASM_ORADATA12';

# keep 1 hour of flashback logs
alter system set db_flashback_retention_target = 60;

# enable flashback
shutdown immediate;
startup mount exclusive;
alter database flashback on;
alter database open;

# and some basic diagnostics scripts
SELECT object_type, message_type, message_level,reason, suggested_action
FROM dba_outstanding_alerts;

select * from v$flash_recovery_area_usage;

select * FROM V$RECOVERY_FILE_DEST;

Using COMMIT_WRITE=BATCH,NOWAIT

# if you have databases where performance is more important than recoverability (e.g volatile pricing systems etc), then throughput can be greatly increased using COMMIT_WRITE.

# DO NOT USE THIS UNLESS YOU UNDERSTAND THE IMPACT !!!!

COMMIT_WRITE=BATCH,NOWAIT

Compressed Oracle Backup can be faster than Uncompressed

I ran a quick test to compare runtime and size of compressed and uncompressed backups.

RUN {
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
BACKUP DATABASE;
}

RUN {
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
BACKUP as compressed backupset DATABASE;
}

File Type File_Size Backup _Time
Datafile on disk 47.6GB
Uncompressed Backup 17.1GB 25 minutes 25 seconds
Compressed Backup 3.0GB 16 minutes 26 seconds

For this test the compressed backup size was only 6% of the uncompressed datafiles, and 17% of the uncompressed backup.
Runtime for compressed backup was only 65% of runtime for uncompressed backup.

The compression ratio, and runtime will vary depending on database size, data density, and IO throughput. I have had similar improvements using Tape backup.

This means that compressed rman backup can :-
1. save significant tape space
2. reduce backup times
3. reduce backup failures.
4. reduce disk space needed for archivelogs
5. speed up refreshes of DEV environments

Auditing selects on a table in Oracle - including FGA

# Audit all selects by a user.

alter system set audit_trail='db,extended';
audit select by robert;

#Audit all selects on a table.

audit select on gateway.userhome by session;

3. Use FGA. Only audit selects on a specified table by a specified user if they include a specified attribute. This does need to check the audit_condition for every select, so will have a small impact to performance even if it does not save an audit record.

begin
dbms_fga.add_policy (
object_schema=>'ROBERT',
object_name=>'TEST_TABLE',
policy_name=>'TEST_FGA',
audit_column => 'SECRET',
statement_types => 'SELECT'
audit_condition=> 'sys_context(''USERENV'', ''SESSION_USER'') = ''ROBERT''',
);
end;
/

Check which Oracle objects are dependant on a database link

select name, type, owner
from all_dependencies
where referenced_link_name = 'BKO_OPE';

To install Java in Oracle 10g

# a cut-down version of Metalink Note 276554.1

cd $ORACLE_HOME/rdbms/admin
sqlplus '/ as sysdba'
@utlrp
spool java.log
set pagesize 100
select COMP_NAME, status from dba_registry;
select object_type||' '||owner||'.'||object_name from dba_objects where status = 'INVALID' order by object_type, owner;
alter system set java_pool_size = 150M SID='tl01dwh1';
@?/javavm/install/initjvm.sql
@?/xdk/admin/initxml.sql
@?/xdk/admin/xmlja.sql
@?/rdbms/admin/catjava.sql
@?/rdbms/admin/catexf.sql
@?/rdbms/admin/utlrp.sql
select object_type||' '||owner||'.'||object_name from dba_objects where status = 'INVALID' order by object_type, owner;
select COMP_NAME, status from dba_registry;
spool off

Open a Oracle Dataguard physical standby read-only

#The following process can be used to open a physical Dataguard standby read-only to run reports or check standby database status.
#The only difference after we change from ARCH to LGWR transport is that we can recover from the standby redo log in near real-time rather than from the archivelog and this is detailed in step 6.

1. On the primary database defer log shipping (be careful that you defer the correct destination)
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=DEFER;

2. On the standby enable cancel recovery
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

3. Open the standby database read-only
ALTER DATABASE OPEN;

4. Run select statements on opened standby database.

5. Put database back in mount state (this is optional, but is best practice and if not done can cause problems with switchover in the future)
STARTUP MOUNT FORCE;

6. Restart recovery on standby database.
# If the database has standby redo logs :-
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;
# If the database does not have standby redo logs :-
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;

7. on the primary database re-enable log shipping that was deferred in step 1
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=ENABLE;

8. switch logs on the primary database (this is optional but will force the standby FAL server to get missing archivelogs ASAP)
alter system switch logfile;

Writing messages to the Oracle Alert Log

# If you want to test your alert log monitoring, or include application alerts in the database alert log.

1 - Write to trace file.
2 - Write to alertlog.
3 - Write to both.

exec dbms_system.ksdwrt(2, 'ORA-20000 test error message');

# or you can use utl_file

select
name into :alert_loc
from
v$parameter
where
name = 'background_dump_destination';

alter system set utl_file_dir = ':alert_log');

utl_file.fopen(':alert_loc','alertprod.log’,'W');

dbms_output.put_line('invalid_application_error');

utl_file.fclose(':alert_loc');

Selecting from the Oracle Alert Log

create directory BDUMP as '/u01/app/oracle/admin/mysid/bdump';

create table
alert_log ( msg varchar2(80) )
organization external (
type oracle_loader
default directory BDUMP
access parameters (
records delimited by newline
)
location('alrt_mysid.log')
)
reject limit 1000;

select msg from alert_log where msg like '%ORA-00600%';
ORA-00600: internal error code, arguments: [17034], [2940981512], [0], [], [], [ ], [], []
ORA-00600: internal error code, arguments: [18095], [0xC0000000210D8BF8], [], [], [], [], []
ORA-00600: internal error code, arguments: [4400], [48], [], [], []

Oracle recommended DDL audit

Audit alter any table by access;
Audit create any table by access;
Audit drop any table by access;
Audit Create any procedure by access;
Audit Drop any procedure by access;
Audit Alter any procedure by access;
Audit Grant any privilege by access;
Audit grant any object privilege by access;
Audit grant any role by access;
Audit audit system by access;
Audit create external job by access;
Audit create any job by access;
Audit create any library by access;
Audit create public database link by access;
Audit exempt access policy by access;
Audit alter user by access;
Audit create user by access;
Audit role by access;
Audit create session by access;
Audit drop user by access;
Audit alter database by access;
Audit alter system by access;
Audit alter profile by access;
Audit drop profile by access;

delete files over 1 day old

find /tmp -mtime +1 -name *.log -exec rm {} \;

Finding inactive users in Oracle

# enable session audit

sqlplus '/ as sysdba'
alter system set audit_trail=db scope=spfile;
shutdown immediate;
startup
audit session;

# after a few months of normal database operation find the users who have not logged in.

select username, created from dba_users where account_status = 'OPEN' and created < sysdate -90 and not exists
(select distinct userid from aud$ where userid = username and LOGOFF$TIME > sysdate -90)
order by username;

HTML Output from sqlplus

# Use sqlplus to produce nicely formatted output

set markup html on
spool c:\system_users.html
select username, default_tablespace from dba_users where default_tablespace = 'SYSTEM';
spool off;
set markup html off

Index and Table Monitoring

create table test (id integer primary key, text varchar2(10));
create index text_idx on test(text);

ALTER INDEX TEXT_IDX MONITORING USAGE;
ALTER INDEX SYS_C005203 MONITORING USAGE;

select index_name, used from v$object_usage;

SQL> select index_name, used from v$object_usage;

INDEX_NAME USE
------------------------------ ---
TEXT_IDX NO
SYS_C005203 NO

insert into test values (1,'test1');

SQL> select index_name, used from v$object_usage;

INDEX_NAME USE
------------------------------ ---
TEXT_IDX NO
SYS_C005203 NO

SQL> select * from test where id = 1;

ID TEXT
---------- ----------
1 test1

SQL> select index_name, used from v$object_usage;

INDEX_NAME USE
------------------------------ ---
TEXT_IDX NO
SYS_C005203 YES

# Table monitoring

alter table PROGRAM_AUDIT_BAK monitoring;

select table_name, inserts, updates, deletes from user_tab_modifications where table_name = 'PROGRAM_AUDIT_BAK';

exec dbms_stats.flush_database_monitoring_info;

or

statistics_level=typical

Shrink a table

select sum(bytes) from dba_segments where segment_name = 'ETPPRICE';
alter table etsdyn.etpprice enable row movement;
ALTER TABLE etsdyn.etpprice SHRINK SPACE CASCADE;
alter table etsdyn.etpprice disable row movement;
select sum(bytes) from dba_segments where segment_name = 'ETPPRICE';
alter index etsdyn.ETPPRICE_PK rebuild online;

Read / Write test on Dataguard Standby using Flashback

# On the primary database defer log shipping
ALTER SYSTEM ARCHIVE LOG CURRENT;
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=DEFER;
SELECT TO_CHAR(CURRENT_SCN) FROM V$DATABASE;

# on the standby enable flashback and disable recovery
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=20G;
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST='/oraarch1';
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
create restore point before_start guarantee flashback database;
ALTER DATABASE ACTIVATE STANDBY DATABASE;
ALTER DATABASE OPEN;

# Run read/write test on opened standby database.

# flashback and switch back to physical standby
STARTUP MOUNT FORCE;
flashback database to restore point before_start;
drop restore point before_start;
ALTER DATABASE CONVERT TO PHYSICAL STANDBY;
STARTUP MOUNT FORCE;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

# on the primary database enable log shipping
ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=ENABLE;

Oracle External Password Store

sqlplus xxx/yyy@qd01bie1

#sqlnet.ora

WALLET_LOCATION =
(SOURCE =
(METHOD = FILE)
(METHOD_DATA =
(DIRECTORY = /opt/oracle/product/10.2.0/db_1/network/admin)
)
)

SQLNET.WALLET_OVERRIDE = TRUE
SSL_CLIENT_AUTHENTICATION = FALSE
SSL_VERSION = 0

# create the wallet

mkstore -wrl "/opt/oracle/product/10.2.0/db_1/network/admin" -create
Enter password:
Enter password again:

# add the login entry
mkstore -wrl "/opt/oracle/product/10.2.0/db_1/network/admin" -createCredential qd01bie1 xxx yyy
Enter password:
Create credential oracle.security.client.connect_string2

# list the entry

mkstore -wrl "C:\app\rgeier\product\11.1.0\db_1\network\admin" -listCredential

# test the login

[prod-oranode-50:qd01bie1]$ sqlplus /@qd01bie1

SQL*Plus: Release 10.2.0.3.0 - Production on Wed Jul 30 12:15:47 2008

Copyright (c) 1982, 2006, Oracle. All Rights Reserved.


Connected to:
Oracle Database 10g Enterprise Edition Release 10.2.0.3.0 - 64bit Production
With the Partitioning, OLAP and Data Mining options

SQL>


http://www.oracle-base.com/articles/10g/SecureExternalPasswordStore_10gR2.php

Encrypted Data Pump

# encrypt datapump export files
expdp username/password ENCRYPTION_PASSWORD=password tables=orders
impdp username/password ENCRYPTION_PASSWORD=password tables=orders

Transparent Data Encryption

# create wallet directory
mkdir /opt/oracle/product/10.2.0/db_1/encryption_wallet

# sqlnet.ora
ENCRYPTION_WALLET_LOCATION=(SOURCE=(METHOD=FILE)(METHOD_DATA=(DIRECTORY=/opt/oracle/product/10.2.0/db_1/encryption_wallet/)))

sqlplus / AS SYSDBA

#create the table and insert data
CREATE TABLE datafix.tde_test (
id NUMBER(10),
data VARCHAR2(50)
);

INSERT INTO datafix.tde_test (id, data) VALUES (1, 'secret');
COMMIT;

select * from datafix.tde_test;

# create the encryption key and password
ALTER SYSTEM SET ENCRYPTION KEY AUTHENTICATED BY "password";

# encrypt the table field
alter table datafix.tde_test modify (data encrypt);

select * from datafix.tde_test;

# close the wallet

ALTER SYSTEM SET WALLET CLOSE;

select * from datafix.tde_test;

# open the wallet

ALTER SYSTEM SET WALLET OPEN IDENTIFIED BY "password";

select * from datafix.tde_test;

# check what is encrypted

SQL> SELECT * FROM dba_encrypted_columns;

OWNER TABLE_NAME
------------------------------ ------------------------------
COLUMN_NAME ENCRYPTION_ALG SAL
------------------------------ ----------------------------- ---
DATAFIX TDE_TEST
DATA AES 192 bits key YES

Manual setup for Physical Dataguard

# Create primary database, backup with rman

connect target /
RUN {
ALLOCATE CHANNEL disk1 DEVICE TYPE DISK FORMAT '/backup/rman_%d_%T_%s_%p';
BACKUP CURRENT CONTROLFILE FOR STANDBY;
BACKUP as compressed backupset database plus archivelog;
}

# Create standby database using rman duplicate database
# Standby should have the same db_name, but different db_unique_name
# If Standby is on same ORACLE_HOME as Primary, set lock_name_space.
# Make sure db_create_file_dest is set

connect target sys/password@a
connect auxiliary /
run{
allocate auxiliary channel aux1 device type disk;
duplicate target database for standby nofilenamecheck dorecover;
}

# setup tnsnames.ora and password files

# on Standby
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
alter system set log_archive_dest_state_1=defer;
alter system set log_archive_dest_1='SERVICE=a LGWR ASYNC REOPEN=60 MAX_FAILURE=15 VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) db_unique_name=a';
alter system set log_archive_dest_state_1=enable;
alter system set fal_server='a';
alter system set fal_client='b';
alter system set log_archive_config='dg_config=(a,b)';
alter system set standby_file_management=auto;

# on Primary
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
ALTER DATABASE ADD STANDBY LOGFILE SIZE 52428800;
alter system set log_archive_dest_state_1=defer;
alter system set log_archive_dest_1='SERVICE=b LGWR ASYNC REOPEN=60 MAX_FAILURE=15 VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) db_unique_name=b';
alter system set log_archive_dest_state_1=enable;
alter system switch logfile;
alter system set fal_server='b';
alter system set fal_client='a';
alter system set log_archive_config='dg_config=(a,b)';
alter system set standby_file_management=auto;

# on Standby
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

Log Miner

alter database add supplemental log data;

select suppemental_log_data_min from v$database;

SQL> EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => '/oracle/TST/oraarch/TST_1_48_6b8c0516_663174038.arc', OPTIONS => DBMS_LOGMNR.NEW);
SQL> EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG);
SQL> select SCN, OPERATION, SQL_REDO, SQL_UNDO from V$LOGMNR_CONTENTS;

begin
dbms_logmnr.start_logmnr(
startTime => '2007-04-16 00:00:00',
endTime => '2007-04-20 00:00:00',
options => dbms_logmnr.DICT_FROM_ONLINE_CATALOG + dbms_logmnr.CONTINUOUS_MINE);
end;

Switch from Physical Dataguard to Logical Dataguard

# pre-checks

SELECT DISTINCT OWNER,TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED ORDER BY OWNER,TABLE_NAME;
SELECT OWNER, TABLE_NAME,BAD_COLUMN FROM DBA_LOGSTDBY_NOT_UNIQUE WHERE TABLE_NAME NOT IN (SELECT TABLE_NAME FROM DBA_LOGSTDBY_UNSUPPORTED);
SELECT SUPPLEMENTAL_LOG_DATA_PK, SUPPLEMENTAL_LOG_DATA_UI FROM V$DATABASE;

# on Primary A
alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ONLINE_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=b';
alter system set LOG_ARCHIVE_DEST_2='SERVICE=a LGWR ASYNC REOPEN=60 MAX_FAILURE=15 VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=a';
alter system set LOG_ARCHIVE_DEST_3='LOCATION=/bie_oraexp/B1/standby VALID_FOR=(STANDBY_LOGFILES,STANDBY_ROLE) DB_UNIQUE_NAME=b';

# on standby B
alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ONLINE_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=a';
alter system set LOG_ARCHIVE_DEST_2='SERVICE=b LGWR ASYNC REOPEN=60 MAX_FAILURE=15 VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=b';
alter system set LOG_ARCHIVE_DEST_3='LOCATION=/bie_oraexp/A1/standby VALID_FOR=(STANDBY_LOGFILES,STANDBY_ROLE) DB_UNIQUE_NAME=a';

# Cancel recovery on physical standby
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

# on primary
EXECUTE DBMS_LOGSTDBY.BUILD;

# on standby database recover and set the new name.
ALTER DATABASE RECOVER TO LOGICAL STANDBY b;

# create the password file for the new database name.
orapwd file=$ORACLE_HOME/dbs/orapw$ORACLE_SID password=mypassword entries=5

# open the new database
STARTUP MOUNT force;
ALTER DATABASE OPEN RESETLOGS;

# start the sql apply process on the logical standby
ALTER DATABASE START LOGICAL STANDBY APPLY IMMEDIATE;
--ALTER DATABASE STOP LOGICAL STANDBY APPLY;

# To switchover
ALTER DATABASE COMMIT TO SWITCHOVER TO LOGICAL STANDBY;

Streams with Downstream Log Mining

# first create standby redo logs on the destination and setup dataguard archival to that destination.
# set priveleges etc. On source and target

sqlplus bobj_login/password
ALTER TABLE bobj_login.orders ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE) COLUMNS;
grant all on bobj_login.orders to strmadmin;

# on target create queue
sqlplus strmadmin/password

begin
DBMS_STREAMS_ADM.SET_UP_QUEUE(queue_table => 'strmadmin.orders_queue_t',
queue_name => 'strmadmin.orders_queue',
queue_user => 'strmadmin');
end;
/

# create database links from source to target, target to source.

# create the cature process and rules on target
connect strmadmin/password

BEGIN
DBMS_CAPTURE_ADM.CREATE_CAPTURE(
queue_name => 'strmadmin.orders_queue',
capture_name => 'orders_capture',
rule_set_name => NULL,
start_scn => NULL,
source_database => 'P01CFD',
use_database_link => TRUE,
first_scn => NULL,
logfile_assignment => 'implicit');
END;
/

BEGIN
DBMS_CAPTURE_ADM.SET_PARAMETER(
capture_name => 'orders_capture',
parameter => 'downstream_real_time_mine',
value => 'Y');
END;
/

BEGIN
DBMS_STREAMS_ADM.ADD_TABLE_RULES(
table_name => 'BOBJ_LOGIN.orders',
streams_type => 'capture',
streams_name => 'orders_capture',
queue_name => 'strmadmin.orders_queue',
include_dml => TRUE,
include_ddl => TRUE,
include_tagged_lcr => FALSE,
source_database => 'P01CFD',
inclusion_rule => TRUE);
END;
/

# start capture on target

exec DBMS_CAPTURE_ADM.START_CAPTURE('orders_capture');

#exec dbms_capture_adm.stop_capture('orders_capture', TRUE);

# create apply process and rules

BEGIN
DBMS_STREAMS_ADM.ADD_TABLE_RULES(
table_name => 'BOBJ_LOGIN.orders',
streams_type => 'apply',
streams_name => 'orders_apply',
queue_name => 'strmadmin.orders_queue',
include_dml => true,
include_ddl => true,
source_database => 'P01CFD',
inclusion_rule => true);
END;
/

BEGIN
DBMS_APPLY_ADM.SET_PARAMETER(
apply_name => 'orders_apply',
parameter => 'disable_on_error',
value => 'n');
END;
/

# create schema transformation. (check in OEM to get the apply DML rule name - or select from dba_rules)
BEGIN
DBMS_STREAMS_ADM.RENAME_SCHEMA(
rule_name => 'ORDERS57',
from_schema_name => 'BOBJ_LOGIN',
to_schema_name => 'BI_DATA',
step_number => 0);
END;
/

# get SCN on source

SET NUMF 999999999999
select current_scn from v$database;

# instantiate the table on target

BEGIN
DBMS_APPLY_ADM.SET_TABLE_INSTANTIATION_SCN(source_object_name=> 'BOBJ_LOGIN.orders',
source_database_name => 'P01CFD' ,
instantiation_scn => 26427653914);
end;
/

# start apply process on target
BEGIN
DBMS_APPLY_ADM.START_APPLY(
apply_name => 'orders_apply');
END;
/

# enter a transaction on the source

insert into bobj_login.orders values (1);
commit;
alter system switch logfile;
alter system switch logfile;


# select on target
select * from bobj_login.orders;

Oracle Startup and Role Change Triggers

# Triggers to change service name for Dataguard Standby databases.

# Create the services

alter system set service_names = 'a';
alter system set service_names = 'b';

exec DBMS_SERVICE.CREATE_SERVICE('PROD','PROD');
exec DBMS_SERVICE.CREATE_SERVICE('STANDBY','STANDBY');

# Role Change Trigger

CREATE OR REPLACE TRIGGER service_name_trg AFTER DB_ROLE_CHANGE ON DATABASE DECLARE
role VARCHAR(30);
BEGIN
SELECT DATABASE_ROLE INTO role FROM V$DATABASE;
IF role = 'PRIMARY' THEN
DBMS_SERVICE.START_SERVICE('PROD');
ELSE
DBMS_SERVICE.START_SERVICE('STANDBY');
END IF;
END;

# Startup Trigger - will only fire after the database is open.
CREATE OR REPLACE TRIGGER startup_trg
after startup on database
DECLARE
role VARCHAR(30);
BEGIN
SELECT DATABASE_ROLE INTO role FROM V$DATABASE;
IF role = 'PRIMARY' THEN
DBMS_SERVICE.START_SERVICE('PROD');
ELSE
DBMS_SERVICE.STOP_SERVICE('STANDBY');
END IF;
END;

# Diagnostics scripts for service status

SELECT name, network_name FROM dba_services;
SELECT service_id, name, network_name FROM gv$active_services;

# Assorted scripts
exec DBMS_SERVICE.STOP_SERVICE('PROD');
exec DBMS_SERVICE.STOP_SERVICE('STANDBY');
exec DBMS_SERVICE.DELETE_SERVICE('PROD');
exec DBMS_SERVICE.START_SERVICE('PROD');

Manual Dataguard Switchover

# primary
# first shutdown the second RAC node
select database_role from v$database;
SELECT SWITCHOVER_STATUS FROM V$DATABASE;
ALTER DATABASE COMMIT TO SWITCHOVER TO PHYSICAL STANDBY WITH SESSION SHUTDOWN;

# standby
select database_role from v$database;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
SELECT SWITCHOVER_STATUS FROM V$DATABASE;
ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY WAIT WITH SESSION SHUTDOWN;
shutdown immediate;
startup;

# old primary
shutdown immediate;
startup;
select database_role from v$database;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

Dataguard Diagnostics Select Statements

1. run on primary to detect failures :-

select destination, status, fail_date, valid_now
from v$archive_dest
where dest_name in ('LOG_ARCHIVE_DEST_1','LOG_ARCHIVE_DEST_2','LOG_ARCHIVE_DEST_3','LOG_ARCHIVE_DEST_4')
and (status <> 'VALID' or VALID_NOW <> 'YES');

2. run on standby to get exact position of rollforward :-

select thread#, to_char(snapshot_time,'dd-mon-yyyy:hh24:mi') primary,
to_char(newest_time,'dd-mon-yyyy:hh24:mi') received,
to_char(applied_time,'dd-mon-yyyy:hh24:mi') applied
from V$STANDBY_APPLY_SNAPSHOT;

Setup Dataguard Broker

sqlplus '/ as sysdba'
alter system set dg_broker_start=true;

dgmgrl sys/password

create configuration 'A_B' as primary database is 'a' connect identifier is a;
add database 'b' as connect identifier is b maintained as physical;

edit database 'a' set property 'LogXptMode'='ASYNC';
edit database 'a' set property 'MaxFailure'=15;
edit database 'a' set property 'ReopenSecs'=60;
edit database 'a' set property StandbyFileManagement='AUTO';

edit database 'b' set property 'LogXptMode'='ASYNC';
edit database 'b' set property 'MaxFailure'=15;
edit database 'b' set property 'ReopenSecs'=60;
edit database 'b' set property StandbyFileManagement='AUTO';

# check configuration
show configuration;
show configuration verbose;
show database verbose 'a';
show database verbose 'b';

# disable and enable configuration
disable configuration;
enable configuration;

# disable and enable a database
disable database 'b';
enable database 'b';

# switchover
switchover to b;

# check archival status
select destination, status, fail_date, valid_now
from v$archive_dest
where dest_name in ('LOG_ARCHIVE_DEST_1')
and (status <> 'VALID' or valid_now <> 'YES');

Split a Partition

# split a partitions
ALTER TABLE "BI_MIFID_STAGING"."SBPRICE" SPLIT PARTITION "SBPRICE_DE010908" AT (TIMESTAMP'2008-08-01 00:00:00')
INTO (PARTITION "SBPRICE_DE010808", PARTITION "SBPRICE_DE010908");

# check for unusable indexes

select count(*) from dba_indexes where status = 'UNUSABLE';
select count(*) from dba_ind_partitions where status = 'UNUSABLE';
select count(*) from dba_ind_subpartitions where status = 'UNUSABLE';

Simple Oracle Streams Example

# 1. Create the strmadmin user on both databases.

CREATE TABLESPACE streams;
CREATE USER strmadmin IDENTIFIED BY strmadmin DEFAULT TABLESPACE streams TEMPORARY TABLESPACE temp QUOTA UNLIMITED ON streams;
GRANT CONNECT, RESOURCE, DBA TO strmadmin;
GRANT SELECT_CATALOG_ROLE TO strmadmin;
GRANT SELECT ANY DICTIONARY TO strmadmin;

# 2. Create the target table dwh.stream_test in both databases.

CREATE TABLESPACE dwh;
CREATE USER dwh IDENTIFIED BY dwh DEFAULT TABLESPACE dwh TEMPORARY TABLESPACE temp QUOTA UNLIMITED ON dwh;
GRANT create session, create table to dwh;
connect dwh/dwh
create table stream_test (test_id integer primary key, test varchar2(20));
ALTER TABLE stream_test ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE) COLUMNS;
grant all on stream_test to strmadmin;

# 3. Create the queue on both databases

connect strmadmin/strmadmin@stream1
BEGIN
DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE(
grantee => 'strmadmin',
grant_privileges => true);
END;
/
connect strmadmin/strmadmin@stream2
BEGIN
DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE(
grantee => 'strmadmin',
grant_privileges => true);
END;
/

# 4. create database links.

CONNECT strmadmin/strmadmin@stream1
EXEC DBMS_STREAMS_ADM.SET_UP_QUEUE();
CREATE DATABASE LINK stream2 CONNECT TO strmadmin IDENTIFIED BY strmadmin USING 'stream2';
CONNECT strmadmin/strmadmin@stream2
CREATE DATABASE LINK stream1 CONNECT TO strmadmin IDENTIFIED BY strmadmin USING 'stream1';

# 5. create the capture process

CONNECT strmadmin/strmadmin@stream1
BEGIN
DBMS_STREAMS_ADM.ADD_TABLE_RULES(
table_name => 'dwh.stream_test',
streams_type => 'capture',
streams_name => 'capture_stream',
queue_name => 'strmadmin.streams_queue',
include_dml => true,
include_ddl => true,
inclusion_rule => true);
END;
/

# 6. create the propagation process

CONNECT strmadmin/strmadmin@stream1
BEGIN
DBMS_STREAMS_ADM.ADD_TABLE_PROPAGATION_RULES(
table_name => 'dwh.stream_test',
streams_name => 'STREAM1_to_STREAM2',
source_queue_name => 'strmadmin.streams_queue',
destination_queue_name => 'strmadmin.streams_queue@STREAM2',
include_dml => true,
include_ddl => true,
source_database => 'STREAM1',
inclusion_rule => true);
END;
/

# 7. set the instantiation number

CONNECT strmadmin/strmadmin@stream1
DECLARE
source_scn NUMBER;
BEGIN
source_scn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER();
DBMS_APPLY_ADM.SET_TABLE_INSTANTIATION_SCN@STREAM2(
source_object_name => 'dwh.stream_test',
source_database_name => 'STREAM1',
instantiation_scn => source_scn);
END;
/

# 8. create the apply rule

CONNECT strmadmin/strmadmin@stream2
BEGIN
DBMS_STREAMS_ADM.ADD_TABLE_RULES(
table_name => 'dwh.stream_test',
streams_type => 'apply',
streams_name => 'apply_stream',
queue_name => 'strmadmin.streams_queue',
include_dml => true,
include_ddl => true,
source_database => 'STREAM1',
inclusion_rule => true);
END;
/

# 9. start the capture process

CONNECT strmadmin/strmadmin@stream1
BEGIN
DBMS_CAPTURE_ADM.START_CAPTURE(
capture_name => 'capture_stream');
END;
/

# 10. start the apply process

CONNECT strmadmin/strmadmin@stream2
BEGIN
DBMS_APPLY_ADM.SET_PARAMETER(
apply_name => 'apply_stream',
parameter => 'disable_on_error',
value => 'n');
END;
/
BEGIN
DBMS_APPLY_ADM.START_APPLY(
apply_name => 'apply_stream');
END;
/

# 11. create a test transaction

connect dwh/dwh@stream1
insert into stream_test values (1,'test1');

# 12. check if streams is working

connect dwh/dwh@stream2
select * from stream_test;