Tuesday, September 30, 2008

OEM job FAILED INIT

# OEM job fails with the following error :-
RemoteOperationException: Failed to establish input-streaming thread

# stop the agent
emctl stop agent

# increate the ThreadPoolModel
cp $ORACLE_HOME/sysman/config/emd.properties $ORACLE_HOME/sysman/config/emd.properties.
vi $ORACLE_HOME/sysman/config/emd.properties

ThreadPoolModel=MEDIUM

# Start the agent
emctl start agent

Redirect an OEM agent to another console

# stop the agent
emctl stop agent

# Change REPOSITORY_URL and emdWalletSrcUrl in emd.properties

# single instance

cp $ORACLE_HOME/sysman/config/emd.properties $ORACLE_HOME/sysman/config/emd.properties.20080930

vi $ORACLE_HOME/sysman/config/emd.properties
:%s/prod-oemnode-10/qa-oemnode-10/g

rm -r $ORACLE_HOME/sysman/emd/state/*
rm -r $ORACLE_HOME/sysman/emd/collection/*
rm -r $ORACLE_HOME/sysman/emd/upload/*
rm $ORACLE_HOME/sysman/emd/lastupld.xml
rm $ORACLE_HOME/sysman/emd/agntstmp.txt
rm $ORACLE_HOME/sysman/emd/blackouts.xml
rm $ORACLE_HOME/sysman/emd/protocol.ini

# RAC

cp $ORACLE_HOME/*/sysman/config/emd.properties $ORACLE_HOME/*/sysman/config/emd.properties.20080930

vi $ORACLE_HOME/*/sysman/config/emd.properties
:%s/prod-oemnode-10/qa-oemnode-10/g

# Delete old config files

rm -r $ORACLE_HOME/*/sysman/emd/state/*
rm -r $ORACLE_HOME/*/sysman/emd/collection/*
rm -r $ORACLE_HOME/*/sysman/emd/upload/*
rm $ORACLE_HOME/*/sysman/emd/lastupld.xml
rm $ORACLE_HOME/*/sysman/emd/agntstmp.txt
rm $ORACLE_HOME/*/sysman/emd/blackouts.xml
rm $ORACLE_HOME/*/sysman/emd/protocol.ini

# start the agent
emctl start agent

# clear the agent
emctl clearstate agent

# register with the console (prompts for password)
emctl secure agent

# upload the configuration to the console
emctl upload

Thursday, September 25, 2008

Delete all files in a directory

[prod-oranode-50:pd01bie1]$ rm *
-bash: /usr/bin/rm: The parameter or environment lists are too long.

for FILE in $(ls); do rm $FILE; done

Find all files with a specified string

find . -exec grep "cmcmarkets" '{}' \; -print

Oracle between is inclusive ...

SQL> select * from test where id between 1 and 3;

ID
----------
1
2
3

Tuesday, September 23, 2008

ora_rowscn does not seem to work ?

SQL> select scn_to_timestamp(max(ORA_ROWSCN)) from TABLE1;

SCN_TO_TIMESTAMP(MAX(ORA_ROWSCN))
---------------------------------------------------------------------------
22-SEP-08 01.13.30.000000000 PM

SQL> insert into table1 values(8);

1 row created.

SQL> select scn_to_timestamp(max(ORA_ROWSCN)) from TABLE1;

SCN_TO_TIMESTAMP(MAX(ORA_ROWSCN))
---------------------------------------------------------------------------
22-SEP-08 01.13.30.000000000 PM

SQL> alter system switch logfile;

System altered.

SQL> select scn_to_timestamp(max(ORA_ROWSCN)) from TABLE1;

SCN_TO_TIMESTAMP(MAX(ORA_ROWSCN))
---------------------------------------------------------------------------
22-SEP-08 01.13.30.000000000 PM

SQL> select sysdate from dual;

SYSDATE
--------------------
23 SEP 2008 17:05:07

Disable streams processes

# stop apply

BEGIN
FOR cur_rec IN (SELECT apply_name FROM dba_apply) LOOP
DBMS_APPLY_ADM.STOP_APPLY(
apply_name => cur_rec.apply_name);
END;

# stop capture

BEGIN
FOR cur_rec IN (SELECT capture_name FROM dba_capture) LOOP
DBMS_CAPTURE_ADM.STOP_CAPTURE(
capture_name => cur_rec.capture_name);
END;

# stop propagate
BEGIN
DBMS_AQADM.DISABLE_PROPAGATION_SCHEDULE(
queue_name => 'strmadm.queue_name',
destination => 'dest');
END;

SELECT propagation_name
FROM dba_propagation;

exec dbms_propagation_adm.stop_propagation('XXX');

Monday, September 22, 2008

Logical Standby Management

# Override Guard protection
ALTER SESSION DISABLE GUARD


# ALTER DATABASE GUARD
ALL - everything
STANDBY - replicated from primary
NONE - nothing


# Change the commit order (faster performance, no read consistency)

exec dbms_logstdby.apply_set('PRESERVE_COMMIT_ORDER', 'FALSE');

# Skip DML on a table

ALTER DATABASE STOP LOGICAL STANDBY APPLY;
EXECUTE DBMS_LOGSTDBY.SKIP ('DML', 'ROBERT', 'TABLE1', null);
ALTER DATABASE START LOGICAL STANDBY APPLY IMMEDIATE;

EXECUTE DBMS_LOGSTDBY.UNSKIP ('DML', 'ROBERT', 'TABLE1');
# Skip a transaction

SELECT XIDUSN, XIDSLT, XIDSQN FROM DBA_LOGSTDBY_EVENTS WHERE EVENT_TIME = (SELECT MAX(EVENT_TIME) FROM DBA_LOGSTDBY_EVENTS);
exec dbms_logstdby.skip_transaction(XIDUSN => 1, XIDSLT => 10, XIDSQN => 1000);

# Instantiate a table using a database link

EXECUTE DBMS_LOGSTDBY.INSTANTIATE_TABLE (SCHEMA_NAME => 'ROBERT', TABLE_NAME => 'TABLE2', DBLINK => 'A_LINK');

# Tables for Diagnostics

SELECT FILE_NAME, SEQUENCE# AS SEQ#, FIRST_CHANGE# AS FCHANGE#, NEXT_CHANGE# AS NCHANGE#, TIMESTAMP, DICT_BEGIN AS BEG, DICT_END AS END,
THREAD# AS THR# FROM DBA_LOGSTDBY_LOG ORDER BY SEQUENCE#;

SELECT EVENT_TIME, STATUS, EVENT FROM DBA_LOGSTDBY_EVENTS ORDER BY EVENT_TIMESTAMP, COMMIT_SCN;

SELECT APPLIED_SCN, LATEST_SCN, MINING_SCN, RESTART_SCN FROM V$LOGSTDBY_PROGRESS;

SELECT SID, SERIAL#, LOGSTDBY_ID AS LID, SPID, TYPE, HIGH_SCN FROM V$LOGSTDBY_PROCESS;

SELECT TYPE, STATUS_CODE, STATUS FROM V$LOGSTDBY_PROCESS;

SELECT * FROM V$LOGSTDBY_STATE;

SELECT NAME, VALUE, TIME_COMPUTED FROM V$LOGSTDBY_STATS;

v$LOGSTDBY

# What Sql is being applied ?

SELECT SAS.SERVER_ID, SS.OWNER, SS.OBJECT_NAME, SS.STATISTIC_NAME, SS.VALUE
FROM V$SEGMENT_STATISTICS SS
, V$LOCK L
, V$STREAMS_APPLY_SERVER SAS
WHERE SAS.SERVER_ID = &SLAVE_ID
AND L.SID = SAS.SID
AND L.TYPE = 'TM'
AND SS.OBJ# = L.ID1;

Friday, September 19, 2008

Moving a datafile to another ASM group

# set tablespace read only
sqlplus
connect / as sysdba
alter tablespace bi_mifid_stg_lob read only;

# datafile offline
alter database datafile '+ASM_ORADATA52/p01bie/datafile/bi_mifid_stg_lob.267.663234691' offline;

# copy datafile
RMAN
connect target /
copy datafile '+ASM_ORADATA52/p01bie/datafile/bi_mifid_stg_lob.267.663234691' to '+ASM_ORADATA51';

# rename datafile
switch datafile '+ASM_ORADATA52/p01bie/datafile/bi_mifid_stg_lob.267.663234691' to copy;
# or
alter database rename datafile
'+ASM_ORADATA52/p01bie/datafile/bi_mifid_stg_lob.267.663234691'
to
'ASM_ORADATA51/p01bie/datafile/bi_mifid_stg_lob.....';

# recover datafile
recover datafile 'ASM_ORADATA51/p01bie/datafile/bi_mifid_stg_lob.....';

# datafile online
sqlplus
connect / as sysdba
alter database datafile 'ASM_ORADATA51/p01bie/datafile/bi_mifid_stg_lob.....' online;

# tablespace read write
alter tablespace bi_mifid_stg_lob read write;

Thursday, September 18, 2008

Prepare devices for ASM on AIX

[dr-oranode-00:su01cfd1]$ ls -l /dev/hdisk*
brw------- 1 root system 18, 1 18 Aug 2007 /dev/hdisk14

mknod /dev/asm_disk501 c 18 1

chown oracle:dba /dev/asm*
chmod 660 /dev/asm*

dd if=/dev/zero of=/dev/asm_disk501 bs=8192 count=25000

Compressed tables and indexes

# create a compressed table
create table orderaudit_compress_pf0_32k compress pctfree 0 tablespace orderaudit_32k nologging as select * from bobj_login.orderaudit
where rownum < 1000000;

# create a compressed index
CREATE INDEX IDX_AUDIT_IDVER_CMP_32K ON orderaudit_nocompress_32k
(UNIQUEID, OBJECTVERSION)
COMPRESS 2
LOGGING
TABLESPACE orderaudit_32k
PARALLEL ( DEGREE 7 INSTANCES 1 );

DBMS_STATS

# find the latest partition
select partition_name from user_tab_partitions where table_name = 'ORDERS';

# create a stats backup table
exec dbms_stats.create_stat_table('BOBJ_LOGIN','STATS_BACKUP');

# export partition stats
exec dbms_stats.export_table_stats('BOBJ_LOGIN','ORDERS','ORDERS_WE210908','STATS_BACKUP',NULL,TRUE);

# collect new partition stats
begin
dbms_stats.gather_table_stats(ownname=> 'bobj_login'
, tabname=> 'ORDERS'
, estimate_percent => 10
, partname=>'ORDERS_WE210908'
,granularity =>'PARTITION'
,cascade =>TRUE
,degree=>4
,no_invalidate =>false);
end;
/

# gather system stats

dbms_stats.create_stat_table(ownname => 'system', stattab => 'system_stats', tblspace => 'datafix_dat');

dbms_stats.gather_system_stats(gathering_mode => 'START',stattab => 'system_stats', statid => 'systats_25062007' ,statown => 'system')

dbms_stats.gather_system_stats(gathering_mode => 'STOP',stattab => 'system_stats', statid => 'systats_25062007' ,statown => 'system');

# export system stats

dbms_stats.export_system_stats(stattab => 'systats_apn_5_1_2006', statid => 'systats_before_new_cpus', statown => 'datafix');

# delete system stats

dbms_stats.delete_system_stats;

# import system stats

dbms_stats.import_system_stats(stattab => 'system_stats', statid => 'systats_25062007', statown => 'system');

# export schema stats

dbms_stats.export_system_stats(stattab => 'systats_apn_5_1_2006', statid => 'systats_before_new_cpus', statown => 'datafix');

# gather schema stats

DBMS_STATS.GATHER_SCHEMA_STATS
(
OwnName => 'bobj_login',
Estimate_Percent => 2,
Block_sample => FALSE,
Method_Opt => 'FOR ALL COLUMNS SIZE 100',
Degree => 4,
Cascade => TRUE,
GRANULARITY => 'ALL'
);

Wednesday, September 17, 2008

ASM rebalance power.

To increase the power of the rebalancing operation on the fly you can do the following (this has been done for ASM_ORADATA52)

ALTER DISKGROUP ASM_ORADATA52 REBALANCE POWER 8 NOWAIT;

You can monitor the rebalancing by querying v$asm_operation via sqlplus (ASM environment)

select * from v$asm_operation;
GROUP_NUMBER OPERA STAT POWER ACTUAL SOFAR EST_WORK EST_RATE
------------ ----- ---- ---------- ---------- ---------- ---------- ----------
EST_MINUTES
-----------
2 REBAL RUN 8 8 1320 170597 393
430

Tuesday, September 16, 2008

ipcs ipcrm

[oracle@dev-statsdb-1 bdump]$ ipcs

------ Shared Memory Segments --------
key shmid owner perms bytes nattch status
0x0001ffb8 0 root 666 76 4
0x00025990 32769 root 666 8308 4
0x00027cb9 65538 root 666 132256 1
0x00027cba 98307 root 666 132256 1
0x00027cbb 131076 root 666 132256 1
0x8142536c 163845 oracle 640 3156213760 0

[oracle@dev-statsdb-1 bdump]$ ipcrm -m 163845

AWR snaps and reports

# take a snapshot

EXEC DBMS_WORKLOAD_REPOSITORY.create_snapshot;

# Run a report

@$ORACLE_HOME/rdbms/admin/awrrpt.sql

Thursday, September 11, 2008

If the db recovery file dest fills up.

************************************************************************
You have following choices to free up space from flash recovery area:
1. Consider changing RMAN RETENTION POLICY. If you are using Data Guard,
then consider changing RMAN ARCHIVELOG DELETION POLICY.
2. Back up files to tertiary device such as tape using RMAN
BACKUP RECOVERY AREA command.
3. Add disk space and increase db_recovery_file_dest_size parameter to
reflect the new space.
4. Delete unnecessary files using RMAN DELETE command. If an operating
system command was used to delete files, then use RMAN CROSSCHECK and
DELETE EXPIRED commands.
************************************************************************

rman
connect target /
crosscheck archivelog all;
delete expired archivelog all;

delete archivelog all completed before 'sysdate -10';

delete noprompt force archivelog all completed before "sysdate-3" backed up 2 times to device type 'SBT_TAPE';

ORA-01111: name for data file 22 is unknown - rename to correct file

# Physical Standby. Datafile added to primary causes recovery to fail on Standby

SQL> recover standby database;
ORA-00283: recovery session canceled due to errors
ORA-01111: name for data file 22 is unknown - rename to correct file
ORA-01110: data file 22: '/opt/oracle/product/10.2.0/db_1/dbs/UNNAMED00022'
ORA-01157: cannot identify/lock data file 22 - see DBWR trace file
ORA-01111: name for data file 22 is unknown - rename to correct file
ORA-01110: data file 22: '/opt/oracle/product/10.2.0/db_1/dbs/UNNAMED00022'

# Fix
alter database create datafile '/opt/oracle/product/10.2.0/db_1/dbs/UNNAMED00022' as '+ASM_ORADATA52/pl01dis1/datafile/disdwh_dat_nassm.dbf';

# To avoid a repeat
alter system set db_create_file_dest='+ASM_ORADATA52';
alter system set standby_file_management=auto;

Wednesday, September 10, 2008

Using cpio to extract from a file

cat 10g_wf_aix.cpio | cpio -idcmv

Rebuilding Indexes, Index Partitions, Index Subpartitions

# INDEX
select distinct 'alter index bi_mifid_staging.'||segment_name||' rebuild tablespace bi_mifid_stg_data online;'
from dba_segments
where owner = 'BI_MIFID_STAGING'
and segment_type = 'INDEX'
and tablespace_name = 'BI_MIFID_STG_IND';

# INDEX PARTITION
select distinct 'alter index bi_mifid_staging.'||segment_name||' rebuild partition '||partition_name||' tablespace bi_mifid_stg_data online;'
from dba_segments
where owner = 'BI_MIFID_STAGING'
and segment_type = 'INDEX PARTITION'
and tablespace_name = 'BI_MIFID_STG_IND';

# INDEX SUBPARTITIONS
select distinct 'alter index bi_mifid_staging.'||index_name||' rebuild subpartition '||subpartition_name||' tablespace bi_mifid_stg_data online;'
from dba_ind_subpartitions
where index_owner = 'BI_MIFID_STAGING'
and tablespace_name = 'BI_MIFID_STG_IND';

Tuesday, September 9, 2008

Check for Oracle block corruption

# To check for Oracle block corruption you can :-

# 1. use DBV
dbv file=/db1/system01.dbf blocksize=8192

#2. use rman
run {
allocate channel disk1 device type disk ;
allocate channel disk2 device type disk ;
backup validate check logical database;
}
select count(*) from v$database_block_corruption;


RMAN> blockrecover corruption list;

Saturday, September 6, 2008

Firewall trigger ?

Unfortunately this did not work because ora_client_ip_address is null :(

REATE OR REPLACE TRIGGER firewall_trig
AFTER LOGON
ON DATABASE
BEGIN
if NVL(ora_client_ip_address, 'N/A') like '111.111.111%' then
raise_application_error(-20000,'Your IP address is not allowed to login');
end if;
END firewall_trig;
/

Alter user trigger

create or replace trigger alter_trigger
before alter
on database
declare
begin
if (ora_dict_obj_type = 'USER') then
raise_application_error(-20010,'you are not allowed to alter users');
end if;
end;
/

dbms_application_info

dbms_application_info.set_module (module_name=>'test_package', action_name=>'starting batch');
dbms_application_info.set_action
(action_name => 'loading table x');
dbms_application_info.set_client_info ('inserting row ' || rowcount);

select module, action, client_info from v$session;

Also use
dbms_application_info.set_session_longops to set v$session_longops.
Can set sofar, total work and Oracle calculates time remaining.

UTL_MAIL example

DECLARE
CRLF CHAR(2) := CHR(10) || CHR(13);

BEGIN
UTL_MAIL.SEND(
sender => 'test@test.com'
,recipients => 'test@test.com'
,cc => NULL
,bcc => NULL
,subject => 'Test Email'
,message => 'Test line1' || CRLF || CRLF
|| 'Test line2'
,mime_type => 'text/plain; charset=us-ascii'
,priority => 1
);
END;

RESUMABLE_TIMEOUT

Temp tablespace filling up and you can't work out who is doing it ?

set RESUMABLE_TIMEOUT. The session will hang when it runs out of space, and you can decide whether to kill the session, or add space. Note that while the temp tablespace is full other sessions will hang also, so think carefully whether you would prefer consumers of abnormally large amounts of space to terminate, or potentially hang other sessions.

Thursday, September 4, 2008

Archive to db_recovery_file_dest

alter system set db_recovery_file_dest_size=400G;
alter system set db_recovery_file_dest='+ASM_ORADATA42';
alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=pd01dis1';

Create the Oracle Password file

orapwd file=$ORACLE_HOME/dbs/orapw$ORACLE_SID password=mypassword entries=5

Moving redo logs to new ASM disk groups

alter system set db_create_online_log_dest_1='+ASM_ORADATA43';
alter system set db_create_online_log_dest_2='+ASM_ORADATA43';

select group#, status from v$log;

ALTER DATABASE DROP LOGFILE GROUP 1;
ALTER DATABASE ADD LOGFILE GROUP 1 SIZE 600M;

ALTER DATABASE DROP LOGFILE GROUP 2;
ALTER DATABASE ADD LOGFILE GROUP 2 SIZE 600M;

ALTER DATABASE DROP LOGFILE GROUP 3;
ALTER DATABASE ADD LOGFILE GROUP 3 SIZE 600M;

SR Dataguard Diag for Standby

-- NAME: DG_phy_stby_diag.sql   
-- ------------------------------------------------------------------------
-- AUTHOR:
-- Michael Smith - Oracle Support Services - DataServer Group
-- Copyright 2002, Oracle Corporation
-- ------------------------------------------------------------------------
-- PURPOSE:
-- This script is to be used to assist in collection information to help
-- troubeshoot Data Guard issues.
-- ------------------------------------------------------------------------
-- DISCLAIMER:
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
-- ------------------------------------------------------------------------
-- Script output is as follows:

set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
select to_char(sysdate,'Mondd_hhmi') timecol,
'.out' spool_extension from sys.dual;
column output new_value dbname
select value || '_' output
from v$parameter where name = 'db_name';
spool dgdiag_phystby_&&dbname&&timestamp&&suffix
set lines 200
set pagesize 35
set trim on
set trims on
alter session set nls_date_format = 'MON-DD-YYYY HH24:MI:SS';
set feedback on
select to_char(sysdate) time from dual;

set echo on

--
-- ARCHIVER can be (STOPPED | STARTED | FAILED) FAILED means that the archiver failed
-- to archive a -- log last time, but will try again within 5 minutes. LOG_SWITCH_WAIT
-- The ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log switching is waiting for. Note that
-- if ALTER SYSTEM SWITCH LOGFILE is hung, but there is room in the current online
-- redo log, then value is NULL

column host_name format a20 tru
column version format a9 tru
select instance_name,host_name,version,archiver,log_switch_wait from v$instance;

-- The following select will give us the generic information about how this standby is
-- setup. The database_role should be standby as that is what this script is intended
-- to be ran on. If protection_level is different than protection_mode then for some
-- reason the mode listed in protection_mode experienced a need to downgrade. Once the
-- error condition has been corrected the protection_level should match the protection_mode
-- after the next log switch.

column ROLE format a7 tru
select name,database_role,log_mode,controlfile_type,protection_mode,protection_level
from v$database;

-- Force logging is not mandatory but is recommended. Supplemental logging should be enabled
-- on the standby if a logical standby is in the configuration. During normal
-- operations it is acceptable for SWITCHOVER_STATUS to be SESSIONS ACTIVE or NOT ALLOWED.

column force_logging format a13 tru
column remote_archive format a14 tru
column dataguard_broker format a16 tru
select force_logging,remote_archive,supplemental_log_data_pk,supplemental_log_data_ui,
switchover_status,dataguard_broker from v$database;

-- This query produces a list of all archive destinations and shows if they are enabled,
-- what process is servicing that destination, if the destination is local or remote,
-- and if remote what the current mount ID is. For a physical standby we should have at
-- least one remote destination that points the primary set but it should be deferred.

COLUMN destination FORMAT A35 WRAP
column process format a7
column archiver format a8
column ID format 99

select dest_id "ID",destination,status,target,
archiver,schedule,process,mountid
from v$archive_dest;

-- If the protection mode of the standby is set to anything higher than max performance
-- then we need to make sure the remote destination that points to the primary is set
-- with the correct options else we will have issues during switchover.

select dest_id,process,transmit_mode,async_blocks,
net_timeout,delay_mins,reopen_secs,register,binding
from v$archive_dest;

-- The following select will show any errors that occured the last time an attempt to
-- archive to the destination was attempted. If ERROR is blank and status is VALID then
-- the archive completed correctly.

column error format a55 tru
select dest_id,status,error from v$archive_dest;

-- Determine if any error conditions have been reached by querying thev$dataguard_status
-- view (view only available in 9.2.0 and above):

column message format a80
select message, timestamp
from v$dataguard_status
where severity in ('Error','Fatal')
order by timestamp;

-- The following query is ran to get the status of the SRL's on the standby. If the
-- primary is archiving with the LGWR process and SRL's are present (in the correct
-- number and size) then we should see a group# active.

select group#,sequence#,bytes,used,archived,status from v$standby_log;

-- The above SRL's should match in number and in size with the ORL's returned below:

select group#,thread#,sequence#,bytes,archived,status from v$log;

-- Query v$managed_standby to see the status of processes involved in the
-- configuration.

select process,status,client_process,sequence#,block#,active_agents,known_agents
from v$managed_standby;

-- Verify that the last sequence# received and the last sequence# applied to standby
-- database.

select max(al.sequence#) "Last Seq Recieved", max(lh.sequence#) "Last Seq Applied"
from v$archived_log al, v$log_history lh;

-- The V$ARCHIVE_GAP fixed view on a physical standby database only returns the next
-- gap that is currently blocking redo apply from continuing. After resolving the
-- identified gap and starting redo apply, query the V$ARCHIVE_GAP fixed view again
-- on the physical standby database to determine the next gap sequence, if there is
-- one.

select * from v$archive_gap;

-- Non-default init parameters.

set numwidth 5
column name format a30 tru
column value format a50 wra
select name, value
from v$parameter
where isdefault = 'FALSE';

spool off

###################################################################

-- NAME: dg_table_diag.sql (Run on PRIMARY and on LOGICAL STANDBY)
-- ------------------------------------------------------------------------
-- Copyright 2004, Oracle Corporation
-- LAST UPDATED: 2/20/04
--
-- Usage: @dg_table_diag schema table
-- ------------------------------------------------------------------------
-- PURPOSE:
-- This script is to be used to assist in collection information to help
-- troubeshoot Data Guard issues with an emphasis on Logical Standby.
-- ------------------------------------------------------------------------
-- DISCLAIMER:
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
-- ------------------------------------------------------------------------
-- Script output is as follows:

set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
select to_char(sysdate,'Mondd_hhmi') timecol,
'.out' spool_extension from sys.dual;
column output new_value dbname
select value || '_' output
from v$parameter where name = 'db_name';
spool dg_table_diag_&&dbname&&timestamp&&suffix
set linesize 79
set pagesize 180
set trim on
set trims on
alter session set nls_date_format = 'MON-DD-YYYY HH24:MI:SS';
set numwidth 15
set long 90000
set feedback on

-- Update the version when you change this file, so we know what file was
-- used to produce what output.

set echo on
--
-- Diagnostic version: 040618
--
set echo off
select to_char(sysdate) time from dual;


-- Show the database name and role, then display details about the table.
select name,database_role,supplemental_log_data_pk pk,
supplemental_log_data_ui ui
from v$database;

-- Exit on error
Whenever sqlerror exit


-- Get the input
variable inowner varchar2(30);
variable intable varchar2(30);
variable objnum number;

exec :inowner := UPPER('&1'); :intable := UPPER('&2')

-- Continue on error
Whenever sqlerror continue

-- Show the table
select DBMS_METADATA.GET_DDL('TABLE', :intable, :inowner) create_table
from dual;

-- Show the indexes on the table.
select DBMS_METADATA.GET_DEPENDENT_DDL('INDEX', :intable, :inowner)
create_indexes
from dual;

-- Show the triggers on the table.
select DBMS_METADATA.GET_DEPENDENT_DDL('TRIGGER', :intable, :inowner)
create_triggers
from dual;

-- Exit on error
Whenever sqlerror exit

-- Get the object number from the log miner dictionary
begin
select slo.obj# into :objnum
from system.logmnr_obj$ slo, system.logmnr_user$ slu,
system.logmnr_uid$ sud, system.logstdby$parameters slp
where slo.owner# = slu.user#
and slo.logmnr_uid = sud.logmnr_uid
and slu.logmnr_uid = sud.logmnr_uid
and to_char(sud.session#) = slp.value
and slp.name = 'LMNR_SID'
and (slo.type# = 2 or slo.type# = 6)
and slu.name = :inowner
and slo.name = :intable;
end;
/

-- Show the various column numbers. (Needed if no dictionary)
set numwidth 8
select c.name, c.col#, c.segcol#, c.null$, c.obj#
from system.logmnr_col$ c,
system.logmnr_uid$ sud, system.logstdby$parameters slp
where c.obj# = :objnum
and c.logmnr_uid = sud.logmnr_uid
and to_char(sud.session#) = slp.value
and slp.name = 'LMNR_SID'
order by col#;

-- Show the statement we died on.
select commit_scn, current_scn, xidusn, xidslt, xidsqn, event, status
from dba_logstdby_events
where commit_scn = (select max(commit_scn) from dba_logstdby_events);


set echo on
--
-- Log Miner: Changes to table on primary.
-- Data gathered from logs registered on standby.
--
set echo off

-- This assumes a standby with logs in it. If you have no logs
-- This does no harm
declare
final_scn number;
mineopt number;
instver number;
dict number;

CURSOR logfiles IS SELECT file_name FROM dba_logstdby_log
WHERE first_change# <= final_scn;
begin
-- Get the instance base version#
select to_number(substr(version, 1, 2)) into instver from v$instance;

-- Compute the last scn to be mined.
select greatest(
(select applied_scn from dba_logstdby_progress),
(select max(commit_scn) from dba_logstdby_events))
into final_scn
from sys.dual;

-- If there is a dictionary use local catelog.
select count(*) into dict from dba_logstdby_log
where dict_begin = 'YES';

-- Register the necessary files
FOR log IN logfiles LOOP
dbms_logmnr.add_logfile(log.file_name);
END LOOP;

-- Start Log Miner (without a dictionary)
-- In 10.1 dbms_logmnr.NO_ROWID_IN_STMT = 2048
mineopt := dbms_logmnr.PRINT_PRETTY_SQL;
if instver > 9 then mineopt := mineopt + 2048; end if;
if dict > 0
then
mineopt := mineopt + dbms_logmnr.DICT_FROM_REDO_LOGS;
mineopt := mineopt + dbms_logmnr.DDL_DICT_TRACKING;
end if;
dbms_logmnr.start_logmnr(
endScn => final_scn,
options => mineopt);
end;
/

-- Continue on error
Whenever sqlerror continue

-- Verify we've added the right logs
set numwidth 6
select thread_id, thread_sqn, low_time, high_time, dictionary_begin
from v$logmnr_logs order by thread_id, thread_sqn;

-- Look at redo for a given table.
column scn format 999999999999999
select 'P' P, SCN, XIDUSN, XIDSLT, XIDSQN, ROLLBACK,
THREAD#, RBASQN, RBABLK, RBABYTE,
SQL_REDO
from v$logmnr_contents
where DATA_OBJ# = :objnum;

-- Exit on error
Whenever sqlerror exit

-- Finish by cleaning up this log miner session.
exec dbms_logmnr.end_logmnr;


set echo on
--
-- Log Miner: Changes to table on local system
-- Data gathered only if ARCHIVELOG mode is on
--
set echo off

-- Mine local logs, since the time we registered our first from primary.
-- The files may not be on disk, so we trap for that.
declare
CURSOR logfiles IS SELECT name FROM v$archived_log
WHERE dest_id = 1
AND completion_time > (select min(timestamp) from dba_logstdby_log)
ORDER BY first_time DESC;
instver NUMBER;
mineopt NUMBER;
begin
BEGIN
FOR log IN logfiles LOOP
dbms_logmnr.add_logfile(log.name);
END LOOP;

EXCEPTION
WHEN OTHERS THEN mineopt := 0;
END;
end;
/

declare
instver NUMBER;
mineopt NUMBER;
begin
-- Get the instance base version#
select to_number(substr(version, 1, 2)) into instver from v$instance;

-- Start Log Miner (without a dictionary)
-- In 10.1 dbms_logmnr.NO_ROWID_IN_STMT = 2048
mineopt := dbms_logmnr.PRINT_PRETTY_SQL +
dbms_logmnr.DICT_FROM_ONLINE_CATALOG;
if instver > 9 then mineopt := mineopt + 2048; end if;

dbms_logmnr.start_logmnr(options => mineopt);
end;
/


-- Continue on error
Whenever sqlerror continue

-- Verify we've added the right logs
set numwidth 6
select thread_id, thread_sqn, low_time, high_time, dictionary_begin
from v$logmnr_logs order by thread_id, thread_sqn;

-- Look at redo for a given table or standby tables.
column scn format 999999999999999
select 'S' S, SCN, XIDUSN, XIDSLT, XIDSQN, ROLLBACK,
THREAD#, RBASQN, RBABLK, RBABYTE,
SQL_REDO
from v$logmnr_contents
where seg_name = 'LOGSTDBY$PARAMETERS'
or seg_name = 'LOGSTDBY$HISTORY'
or seg_name = 'LOGSTDBY$SKIP'
or seg_name = 'LOGSTDBY$SKIP_TRANSACTION'
or (seg_owner = :inowner and seg_name = :intable);

-- Finish by cleaning up this log miner session.
exec dbms_logmnr.end_logmnr;

spool off

exit

Dataguard SR diagnostics script for PRIMARY

-- NAME: dg_prim_diag.sql  (Run on PRIMARY with a LOGICAL or PHYSICAL STANDBY)
-- ------------------------------------------------------------------------
-- Copyright 2002, Oracle Corporation
-- LAST UPDATED: 2/23/04
--
-- Usage: @dg_prim_diag
-- ------------------------------------------------------------------------
-- PURPOSE:
-- This script is to be used to assist in collection information to help
-- troubeshoot Data Guard issues with an emphasis on Logical Standby.
-- ------------------------------------------------------------------------
-- DISCLAIMER:
-- This script is provided for educational purposes only. It is NOT
-- supported by Oracle World Wide Technical Support.
-- The script has been tested and appears to work as intended.
-- You should always run new scripts on a test instance initially.
-- ------------------------------------------------------------------------
-- Script output is as follows:

set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
select to_char(sysdate,'Mondd_hhmi') timecol,
'.out' spool_extension from sys.dual;
column output new_value dbname
select value || '_' output
from v$parameter where name = 'db_name';
spool dg_prim_diag_&&dbname&&timestamp&&suffix
set linesize 79
set pagesize 35
set trim on
set trims on
alter session set nls_date_format = 'MON-DD-YYYY HH24:MI:SS';
set feedback on
select to_char(sysdate) time from dual;

set echo on

-- In the following the database_role should be primary as that is what
-- this script is intended to be run on. If protection_level is different
-- than protection_mode then for some reason the mode listed in
-- protection_mode experienced a need to downgrade. Once the error
-- condition has been corrected the protection_level should match the
-- protection_mode after the next log switch.

column role format a7 tru
column name format a10 wrap

select name,database_role role,log_mode,
protection_mode,protection_level
from v$database;

-- ARCHIVER can be (STOPPED | STARTED | FAILED). FAILED means that the
-- archiver failed to archive a log last time, but will try again within 5
-- minutes. LOG_SWITCH_WAIT The ARCHIVE LOG/CLEAR LOG/CHECKPOINT event log
-- switching is waiting for. Note that if ALTER SYSTEM SWITCH LOGFILE is
-- hung, but there is room in the current online redo log, then value is
-- NULL

column host_name format a20 tru
column version format a9 tru

select instance_name,host_name,version,archiver,log_switch_wait
from v$instance;

-- The following query give us information about catpatch.
-- This way we can tell if the procedure doesn't match the image.

select version, modified, status from dba_registry
where comp_id = 'CATPROC';

-- Force logging is not mandatory but is recommended. Supplemental
-- logging must be enabled if the standby associated with this primary is
-- a logical standby. During normal operations it is acceptable for
-- SWITCHOVER_STATUS to be SESSIONS ACTIVE or TO STANDBY.

column force_logging format a13 tru
column remote_archive format a14 tru
column dataguard_broker format a16 tru

select force_logging,remote_archive,
supplemental_log_data_pk,supplemental_log_data_ui,
switchover_status,dataguard_broker
from v$database;

-- This query produces a list of all archive destinations. It shows if
-- they are enabled, what process is servicing that destination, if the
-- destination is local or remote, and if remote what the current mount ID
-- is.

column destination format a35 wrap
column process format a7
column archiver format a8
column ID format 99
column mid format 99

select dest_id "ID",destination,status,target,
schedule,process,mountid mid
from v$archive_dest order by dest_id;

-- This select will give further detail on the destinations as to what
-- options have been set. Register indicates whether or not the archived
-- redo log is registered in the remote destination control file.

set numwidth 8
column ID format 99

select dest_id "ID",archiver,transmit_mode,affirm,async_blocks async,
net_timeout net_time,delay_mins delay,reopen_secs reopen,
register,binding
from v$archive_dest order by dest_id;

-- The following select will show any errors that occured the last time
-- an attempt to archive to the destination was attempted. If ERROR is
-- blank and status is VALID then the archive completed correctly.

column error format a55 wrap

select dest_id,status,error from v$archive_dest;

-- The query below will determine if any error conditions have been
-- reached by querying the v$dataguard_status view (view only available in
-- 9.2.0 and above):

column message format a80

select message, timestamp
from v$dataguard_status
where severity in ('Error','Fatal')
order by timestamp;

-- The following query will determine the current sequence number
-- and the last sequence archived. If you are remotely archiving
-- using the LGWR process then the archived sequence should be one
-- higher than the current sequence. If remotely archiving using the
-- ARCH process then the archived sequence should be equal to the
-- current sequence. The applied sequence information is updated at
-- log switch time.

select ads.dest_id,max(sequence#) "Current Sequence",
max(log_sequence) "Last Archived"
from v$archived_log al, v$archive_dest ad, v$archive_dest_status ads
where ad.dest_id=al.dest_id
and al.dest_id=ads.dest_id
group by ads.dest_id;

-- The following select will attempt to gather as much information as
-- possible from the standby. SRLs are not supported with Logical Standby
-- until Version 10.1.

set numwidth 8
column ID format 99
column "SRLs" format 99
column Active format 99

select dest_id id,database_mode db_mode,recovery_mode,
protection_mode,standby_logfile_count "SRLs",
standby_logfile_active ACTIVE,
archived_seq#
from v$archive_dest_status;

-- Query v$managed_standby to see the status of processes involved in
-- the shipping redo on this system. Does not include processes needed to
-- apply redo.

select process,status,client_process,sequence#
from v$managed_standby;

-- The following query is run on the primary to see if SRL's have been
-- created in preparation for switchover.

select group#,sequence#,bytes from v$standby_log;

-- The above SRL's should match in number and in size with the ORL's
-- returned below:

select group#,thread#,sequence#,bytes,archived,status from v$log;

-- Non-default init parameters.

set numwidth 5
column name format a30 tru
column value format a48 wra
select name, value
from v$parameter
where isdefault = 'FALSE';

spool off

Wednesday, September 3, 2008

oradebug to suspend and resume another user's session

# You can suspend and resume another user's session using the oradebug command in sqlplus.

select spid, pid from v$process where addr = (select paddr from v$session where sid = 117);

SPID PID
------------ ----------
729304 32

SQL> oradebug setospid 729304
Oracle pid: 32, Unix process pid: 729304, image: oracle@p-test-oranode-30 (TNS V1-V3)
SQL> oradebug suspend
Statement processed.
SQL> oradebug resume
Statement processed.

Tuesday, September 2, 2008

Taking ASM disk groups offline.

# first shutdown the databases on the relevant disk groups.

# on ASM
ALTER DISKGROUP ASM_ORADATA31 DISMOUNT;
ALTER DISKGROUP ASM_ORADATA32 DISMOUNT;

SQL> select name, state from v$asm_diskgroup;

NAME STATE
------------------------------ -----------
ASM_ORADATA11 MOUNTED
ASM_ORADATA12 MOUNTED
ASM_ORADATA13 MOUNTED
ASM_ORADATA32 DISMOUNTED
ASM_ORADATA31 DISMOUNTED
ASM_ORADATA41 MOUNTED
ASM_ORADATA42 MOUNTED

ALTER DISKGROUP ASM_ORADATA31 MOUNT;
ALTER DISKGROUP ASM_ORADATA32 MOUNT;

Working with RMAN stored scripts

LIST SCRIPT NAMES;

PRINT SCRIPT full_backup;

PRINT GLOBAL SCRIPT full_backup TO FILE 'my_script_file.txt';

CREATE SCRIPT full_backup { BACKUP DATABASE PLUS ARCHIVELOG; DELETE OBSOLETE; }

CREATE GLOBAL SCRIPT global_full_backup COMMENT 'use only with ARCHIVELOG mode databases' { BACKUP DATABASE PLUS ARCHIVELOG; DELETE OBSOLETE; }

CREATE SCRIPT full_backup FROM FILE 'my_script_file.txt';

RUN { EXECUTE SCRIPT full_backup; }
RUN { EXECUTE GLOBAL SCRIPT global_full_backup; }

replace script delete_sl02sbt_archives
{ delete noprompt force archivelog LIKE '%sl02sbt%' completed before "sysdate-2"; }

IP address exclusion rules in sqlnet.ora

$ORACLE_HOME/NETWORK/ADMIN/SQLNET.ORA

tcp.validnode_checking = YES
tcp.invited_nodes = {list of IP addresses}
tcp.excluded_nodes = {list of IP addresses}

You cannot specify a range, wildcard, partial IP or subnet mask
You must put all invited nodes in one line; likewise for excluded nodes.
You should always enter localhost as an invited node.
TCP.INVITED_NODES takes precedence over the TCP.EXCLUDED_NODES

Monday, September 1, 2008

Trigger events

DML_event_clause

DELETE

Specify DELETE if you want the database to fire the trigger whenever a DELETE statement removes a row from the table or removes an element from a nested table.


INSERT

Specify INSERT if you want the database to fire the trigger whenever an INSERT statement adds a row to a table or adds an element to a nested table.


UPDATE

Specify UPDATE if you want the database to fire the trigger whenever an UPDATE statement changes a value in one of the columns specified after OF. If you omit OF, then the database fires the trigger whenever an UPDATE statement changes a value in any column of the table or nested table.

For an UPDATE trigger, you can specify object type, varray, and REF columns after OF to indicate that the trigger should be fired whenever an UPDATE statement changes a value in one of the columns. However, you cannot change the values of these columns in the body of the trigger itself.

ddl_event

ALTER

Specify ALTER to fire the trigger whenever an ALTER statement modifies a database object in the data dictionary.

ANALYZE

Specify ANALYZE to fire the trigger whenever the database collects or deletes statistics or validates the structure of a database object.


ASSOCIATE STATISTICS

Specify ASSOCIATE STATISTICS to fire the trigger whenever the database associates a statistics type with a database object.


AUDIT

Specify AUDIT to fire the trigger whenever the database tracks the occurrence of a SQL statement or tracks operations on a schema object.


COMMENT

Specify COMMENT to fire the trigger whenever a comment on a database object is added to the data dictionary.


CREATE

Specify CREATE to fire the trigger whenever a CREATE statement adds a new database object to the data dictionary.


DISASSOCIATE STATISTICS

Specify DISASSOCIATE STATISTICS to fire the trigger whenever the database disassociates a statistics type from a database object.


DROP

Specify DROP to fire the trigger whenever a DROP statement removes a database object from the data dictionary.


GRANT

Specify GRANT to fire the trigger whenever a user grants system privileges or roles or object privileges to another user or to a role.


NOAUDIT

Specify NOAUDIT to fire the trigger whenever a NOAUDIT statement instructs the database to stop tracking a SQL statement or operations on a schema object.


RENAME

Specify RENAME to fire the trigger whenever a RENAME statement changes the name of a database object.


REVOKE

Specify REVOKE to fire the trigger whenever a REVOKE statement removes system privileges or roles or object privileges from a user or role.


TRUNCATE

Specify TRUNCATE to fire the trigger whenever a TRUNCATE statement removes the rows from a table or cluster and resets its storage characteristics.


DDL

Specify DDL to fire the trigger whenever any of the preceding DDL statements is issued.

database_event

SERVERERROR

Specify SERVERERROR to fire the trigger whenever a server error message is logged.

The following errors do not cause a SERVERERROR trigger to fire:

  • ORA-01403: no data found

  • ORA-01422: exact fetch returns more than requested number of rows

  • ORA-01423: error encountered while checking for extra rows in exact fetch

  • ORA-01034: ORACLE not available

  • ORA-04030: out of process memory when trying to allocate string bytes (string, string)


LOGON

Specify LOGON to fire the trigger whenever a client application logs onto the database.


LOGOFF

Specify LOGOFF to fire the trigger whenever a client application logs off the database.


STARTUP

Specify STARTUP to fire the trigger whenever the database is opened.


SHUTDOWN

Specify SHUTDOWN to fire the trigger whenever an instance of the database is shut down.


SUSPEND

Specify SUSPEND to fire the trigger whenever a server error causes a transaction to be suspended.

DB_ROLE_CHANGE

In a Data Guard configuration, specify DB_ROLE_CHANGE to fire the trigger whenever a role change occurs from standby to primary or from primary to standby.