Table of Contents

Download java grants / permissions so they can be reissued after a database duplication

Ref Backup & Restore Java Privileges

TS=$(date '+%Y%m%d')

rm -f "${DATA_PUMP_DIR}/export_${TO_SID}_JAVA_${TS}.sql"

sqlplus -s / as sysdba<<'EOSQL' >"${DATA_PUMP_DIR}/export_${TO_SID}_JAVA_${TS}.sql" 2>"/tmp/results.synchro.$$"
set echo off feed off newpa none head off lines 1000 pages 0 trims on
column stmt format a500 word_wrapped
select 'exec '||stmt||''');'
from   (
       select seq
       ,      'dbms_java.grant_permission('''||grantee||''','''|| type_schema||':'||type_name||''','''||name||''','''||action stmt
       from   dba_java_policy
       where  grantee not in ('JAVADEBUGPRIV','JAVASYSPRIV','JAVAUSERPRIV','JAVA_ADMIN','JAVA_DEPLOY','SYS','PUBLIC','EJBCLIENT','SYSTEM','ORDSYS','MDSYS','JMXSERVER','DBJAVASCRIPT')
       and    type_name != 'oracle.aurora.rdbms.security.PolicyTablePermission'
       union  all
       select seq
       ,      'dbms_java.grant_policy_permission('''||a.grantee||''','''|| u.name||''','''||permission||''','''||action       stmt
       from   sys.user$ u
       ,      (
              select seq                                                                seq
              ,      grantee                                                            grantee
              ,      to_number(substr(name,1,instr(name,':')-1))                        userid
              ,      substr(name, instr(name,':')+1, instr(name,'#')-instr(name,':')-1) permission
              ,      substr(name,instr(name,'#')+1)                                     action
              from   dba_java_policy
              where  grantee not in ('JAVADEBUGPRIV','JAVASYSPRIV','JAVAUSERPRIV','JAVA_ADMIN','JAVA_DEPLOY','SYS','PUBLIC','EJBCLIENT','SYSTEM','ORDSYS','MDSYS','JMXSERVER','DBJAVASCRIPT')
              and    type_name = 'oracle.aurora.rdbms.security.PolicyTablePermission'
              ) a
       where  u.user# = userid
       )
order  by seq
/
EOSQL

Use wget to download patches etc. from Oracle

Using MOS / Metalink credentials, wget can be used from the command line to retrieve files from Oracle rather than use a browser and then ftp them

#!/bin/sh

#
# Generated 6/21/20 4:57 PM
# Start of user configurable variables
#
LANG=C
export LANG

# Trap to cleanup cookie file in case of unexpected exits.
trap 'rm -f $COOKIE_FILE; exit 1' 1 2 3 6

# SSO username
printf 'SSO UserName:'
read SSO_USERNAME
printf 'SSO Password:'
read SSO_PASSWORD



# Path to wget command
WGET=/usr/local/bin/wget

# Log directory and file
LOGDIR=.
LOGFILE=$LOGDIR/wgetlog-$(date +%m-%d-%y-%H:%M).log

# Print wget version info
echo "Wget version info:
------------------------------
$($WGET -V)
------------------------------" > "$LOGFILE" 2>&1

# Location of cookie file
COOKIE_FILE=$(mktemp -t wget_sh_XXXXXX) >> "$LOGFILE" 2>&1
if [ $? -ne 0 ] || [ -z "$COOKIE_FILE" ]
then
 echo "Temporary cookie file creation failed. See $LOGFILE for more details." |  tee -a "$LOGFILE"
 exit 1
fi
echo "Created temporary cookie file $COOKIE_FILE" >> "$LOGFILE"

# Output directory and file
OUTPUT_DIR=.

#
# End of user configurable variable
#

# The following command to authenticate uses HTTPS. This will work only if the wget in the environment
# where this script will be executed was compiled with OpenSSL.
#
# $WGET  --secure-protocol=auto --save-cookies="$COOKIE_FILE" --keep-session-cookies  --http-user "$SSO_USERNAME" --ask-password  "https://updates.oracle.com/Orion/Services/download" -O /dev/null 2>> "$LOGFILE"
 $WGET  --save-cookies="$COOKIE_FILE" --keep-session-cookies  --http-user "$SSO_USERNAME" --http-password="$SSO_PASSWORD" "http://updates.oracle.com/Orion/Services/download" -O /dev/null 2>> "$LOGFILE"

# Verify if authentication is successful
if [ $? -ne 0 ]
then
 echo "Authentication failed with the given credentials." | tee -a "$LOGFILE"
 echo "Please check logfile: $LOGFILE for more details."
else
 echo "Authentication is successful. Proceeding with downloads..." >> "$LOGFILE"

 $WGET  --load-cookies="$COOKIE_FILE" --save-cookies="$COOKIE_FILE" --keep-session-cookies "https://updates.oracle.com/Orion/Services/download/p29202461_195000DBRU_AIX64-5L.zip?aru=23226670&patch_file=p29202461_195000DBRU_AIX64-5L.zip" -O "$OUTPUT_DIR/p29202461_195000DBRU_AIX64-5L.zip"   >> "$LOGFILE" 2>&1

 $WGET  --load-cookies="$COOKIE_FILE" "https://updates.oracle.com/Orion/Services/download/p30125133_190000_AIX64-5L.zip?aru=23151934&patch_file=p30125133_190000_AIX64-5L.zip" -O "$OUTPUT_DIR/p30125133_190000_AIX64-5L.zip"   >> "$LOGFILE" 2>&1

fi

# Cleanup
rm -f "$COOKIE_FILE"
echo "Removed temporary cookie file $COOKIE_FILE" >> "$LOGFILE"

SQL

List all instances running on a server

Sounds simple enough and indeed pre 12.1, is was as easy as

RUNNING_SIDS="$(ps -efo args|grep '[p]mon'|awk -F'ora_pmon_' '! /\+ASM|^$/ {print $NF}'|sort -fu)"

but now we have the added complication of CDB/PDB (container / pluggable databases) so a fair bit more work is needed!

#!/bin/ksh
# ==============================================================================
# Name         : list_server_db
# Description  : List all running databases found on a server
# 
# Parameters   : none
# 
# Notes        : none
# 
# Modification History
# ====================
# When      Who               What
# ========= ================= ==================================================
# 21-DEC-16 Stuart Barkley    Created
# ==============================================================================
for db in $(ps -ef | grep [[p]]mon|awk -F_ '{print $NF}')
do
    export ORACLE_SID=${db}
    ORAENV_ASK=NO
    . oraenv >/dev/null 2>&1
    sqlplus -s / as sysdba<<EOSQL
set serveroutput on feed off pages 0 head off newpa none
declare
    e_col_not_found exception;
    e_not_mounted   exception;
    pragma          exception_init(e_not_mounted, -01507);
    pragma          exception_init(e_col_not_found, -904);
    l_db_name       varchar2(20);
    l_database_role varchar2(20);
    l_cdb           varchar2(20);
    l_status        varchar2(20);
    type varchar2_tab is table of varchar2(240) index by binary_integer;
    t_pdbs          varchar2_tab;
begin
    begin
        dbms_output.enable(null);
        execute immediate 'select name, database_role, cdb from v\$database' into l_db_name, l_database_role, l_cdb;
        if l_cdb = 'YES' then
            l_status := 'CDB';
            execute immediate 'select name from v\$pdbs' bulk collect into t_pdbs;
            for i in nvl(t_pdbs.first,1) .. nvl(t_pdbs.last,0)
            loop
                dbms_output.put_line (t_pdbs(i)||':PDB');
            end loop;
        else
            l_status := 'Independent';  -- col CDB exists but this is not one
        end if;
        if l_database_role like '%STANDBY%' then
            l_status := 'Standby';
        end if;
    exception
    when e_not_mounted then
        l_status := 'Not Mounted';
    when e_col_not_found then
        l_status := 'pre12';
    end;

    dbms_output.put_line ('${db}'||':'||l_status);

end;
/
EOSQL
done

Run a command or script on all databases in tnsnames.ora

Very handy if you have a global tnsnames.ora accessible to all clients. So from a client install, this has access to all databases. Any files created can be created on the client so no need to scp to all the machines to recover spool files etc. Downside: Cannot connect as sysdba across tns so password will need to be known for whatever user is used.

#!/bin/ksh
TNSNAMES="./tnsnames.ora"
[[ ! -r "${TNSNAMES}" ]] && echo "${TNSNAMES} is not accessible" && exit 1

SIDLIST=$(perl -ne 'print "$1\n" if /(?:SID|SERVICE_NAME)\s*=\s*(\S+?)\)/' ${TNSNAMES}|sort|uniq|xargs)
for SID in ${SIDLIST}
do
    echo "Processing $SID"
    ls -al | grep "^d" | grep "\_${SID}" >/dev/null
    if [[ $? -ne 0 ]]; then
        echo 'exit' | sqlplus system/xxxxxx@${SID} @ReviewLite17.1.sql >/dev/null 2>&1
    else
        echo "directory already exists so must have done this SID"
    fi
done

Run a command or script on all databases in oratab

#!/usr/bin/ksh
# ==============================================================================
# Name         : all_db_do
# Description  : Loops over all the databases in oratab and executes the
#                specified SQL command - be careful!
#
# Parameters   : -v(erbose)
#                -q(uiet) - just the results
#                -i(nstance list) - if provided, only run on these instances
#                -f(ilename) | <quoted SQL command to be executed>]
#
# Examples     :
#        all_db_do -v 'alter system switch logfile;'
#        all_db_do -f sessions.sql
#        all_db_do -q -i "orgt rpad reportt" 'select open_mode from v$database;'
#        all_db_do -v "alter system set local_listener='LISTENER_&SID';"
#        all_db_do "create directory data_pump_dir as '/oracle/export/&SID';"
#
# Notes        : If -b is specified to run on all databases concurently, a
#                logfile is created in the form /tmp/results_all_db_do_<SID>.log
#
# Modification History
# ====================
# When      Who               What
# ========= ================= ==================================================
# 14-JAN-13 Stuart Barkley    Created
# 25-FEB-13 Stuart Barkley    Adapt for Solaris
# 21-MAR-13 Stuart Barkley    Check db is running first
# 29-APR-13 Stuart Barkley    Added verbose flag processing
# 24-MAY-13 Stuart Barkley    Wrap / as sysdba in quotes for older versions
# 23-JUN-16 Stuart Barkley    Add -f to run a file containing sql
# 23-SEP-21 Stuart Barkley    Add -b to run stuff in the background
# ==============================================================================

PROGNAME=$(basename $0)
OS=$(uname -s)

AWK=$(which awk)
GREP=$(which grep)
if [[ "${OS}" == "SunOS" ]]; then
    AWK=/usr/xpg4/bin/awk
    GREP=/usr/xpg4/bin/grep
fi

if [[ ! -r /etc/oratab ]]; then
    if [[ "${OS}" == "SunOS" ]]; then
        echo "oratab is not where we want it. Please run 'ln -s /var/opt/oracle/oratab /etc/oratab' as root and retry" && exit 1
    else
        echo "Either Oracle is not installed or database is running without an oratab" && exit 1
    fi
fi

# -------------------------
# get the arguments, if any
# -------------------------
unset BACKGROUND VERBOSE SIDLIST
BACKGROUND=false
while getopts "btvqf:i:" flag
do
    case "${flag}" in
    b) BACKGROUND=true;;
    t) TNS='@${ORACLE_SID}';;
    v) VERBOSE=1;;
    q) QUIET="feed off verif off head off newpa none";;
    f) FILE2RUN=${OPTARG};;
    i) SIDLIST=${OPTARG};;
    *) echo "ERROR: Unrecognised flag, ${flag}" && exit 1;;
    esac
done
shift $((OPTIND-1))

# ----------------------------------------------------------------------------------
# if no instances supplied, take all those where the restart parameter is set to "Y"
# ----------------------------------------------------------------------------------
if [[ "${SIDLIST}" == "" ]]; then
    SIDLIST="$(awk -F':' 'NF>3 && $3=="Y" && $1 !~/[#*]/ {print $1}' /etc/oratab)"
fi

if [[ -z ${FILE2RUN} ]]; then
    # no file, need an SQL supplied
    if [[ -z $1 ]]; then
        echo "Usage: $0 [-v] [-q] [-f <filename> | <quoted SQL command to be executed>]"
        echo "eg: $0 -f sessions.sql"
        echo "eg: $0 -v 'alter system switch logfile;'"
        exit 1
    fi
    SQL=$1
elif [[ -s ${FILE2RUN} ]]; then
    # file supplied and exists
    SQL="@${FILE2RUN}"
else
    # file supplied, does it exist
    echo "File ${FILE2RUN} is empty or does not exist"
    echo "Usage: $0 [-v] [-q] [-f <filename> | <quoted SQL command to be executed>]"
    echo "eg: $0 -f sessions.sql"
    echo "eg: $0 -v 'alter system switch logfile;'"
    exit 1
fi

echo "${PATH}"|grep '/usr/local/bin' >/dev/null 2>&1
[[ $? -ne 0 ]] && PATH="/usr/local/bin:${PATH}"

ORAENV_ASK=NO
for db in ${SIDLIST}
do
    if ( [[ $1 != "startup" ]] && [[ $(ps -ef | ${GREP} -c -E "[o]ra_pmon_${db}$") -eq 0 ]] ); then
        [[ ! -z "$VERBOSE" ]] && echo "$db is not running, skipping..."
        continue
    fi
    echo
    [[ ! -z "$VERBOSE" ]] && printf "\n\n%s %s %s\n" "================ Start on ${db} at" "$(date '+%d-%b-%Y %H:%M:%S')" "================"
    export ORACLE_SID=$db
    . oraenv >/dev/null
    [[ $? -ne 0 ]] && echo "ERROR: Unable to set environment for ${ORACLE_SID}, skipping" && continue
    case "${BACKGROUND}" in
    (false)
        "${ORACLE_HOME}/bin/sqlplus" -s "/${TNS} as sysdba" <<EOSQL
        define SID="${ORACLE_SID}"
        col comp_name format a50
        col value for a40
        col filepath for a200
        set lines 1000 pages 100 $QUIET
        $SQL
EOSQL
        true
        ;;
    (true)
        "${ORACLE_HOME}/bin/sqlplus" -s "/${TNS} as sysdba" <<EOSQL >>/tmp/results_${PROGNAME}_${db}.log &
        define SID="${ORACLE_SID}"
        col comp_name format a50
        col value for a40
        col filepath for a200
        set lines 1000 pages 100 $QUIET
        $SQL
EOSQL
        true
        ;;
    (*)
        echo "ERROR: Unexpected value for BACKGROUND [${BACKGROUND}]. Please report this, it is a scripting error."
        false
        ;;
    esac
    [[ ! -z "$VERBOSE" ]] && printf "%s %s %s\n\n" "================   End on ${db} at" "$(date '+%d-%b-%Y %H:%M:%S')" "================"
done
wait
echo "INFO: All done"
all_db_do "create or replace directory data_pump_dir as '/oracle/export/&SID';"

all_db_do "archive log list"

all_db_do "shutdown immediate"

all_db_do 'alter system set log_archive_dest_1="location=/oracle/arch/&SID" scope=both;'

Run an SQL command on behalf of another user

It can be used to execute any sql command as a specified user provided this procedure is created in sys schema.

set serveroutput on
create or replace procedure run_sql_as_user ( p_schema_name in varchar2
                                            , p_sql         in varchar2
                                            ) is
    l_user_id dba_users.user_id%type;
    l_cursor  number;
    l_rc      number;
begin
    dbms_output.enable(null);
    begin
        select u.user_id
        into   l_user_id
        from   dba_users u
        where  u.username = p_schema_name;
    exception
    when no_data_found then
        raise_application_error (-20001, 'Cannot find user '||p_schema_name);
    end;
    l_cursor := dbms_sys_sql.open_cursor;
    dbms_sys_sql.parse_as_user ( c             => l_cursor
                               , statement     => p_sql
                               , language_flag => dbms_sql.native
                               , userid        => l_user_id
    );
    l_rc := dbms_sys_sql.execute(l_cursor);
    dbms_sys_sql.close_cursor(l_cursor);
exception
when others then
    raise_application_error (-20001, 'ERROR: '||sqlerrm);
end;
/

Run a script on all databases listed in tnsnames.ora

Can run an SQL command file on all SID/SERVICE found in a tnsnames.ora file. I use this from a client laptop having a global tns file on it. This way it's not necessary to copy the file onto every host before executing it. Any files created by the script will be created on the client.

#!/bin/ksh

TNSNAMES="./tnsnames.ora"

[[ ! -r "${TNSNAMES}" ]] && echo "${TNSNAMES} is not accessible" && exit 1

SIDLIST=$(perl -ne 'print "$1\n" if /(?:SID|SERVICE_NAME)\s*=\s*(\S+?)\)/' ${TNSNAMES}|sort|uniq|xargs)
for SID in ${SIDLIST}
do
    echo "Processing $SID"
    ls -al | grep "^d" | grep "\_${SID}" >/dev/null
    if [[ $? -ne 0 ]]; then
        echo 'exit' | sqlplus system/password@${SID} @ReviewLite17.1.sql >/dev/null 2>&1
    else
        echo "directory already exists so must have done this SID"
    fi
done

List invalid objects

 set lines 200
 set pages 200
 col obj format a40
 select owner||'.'||object_name obj
 ,      object_type
 from   dba_objects
 where  1=1
 and    status = 'INVALID';

Recompile all invalid objects

@?/rdbms/admin/utlrp.sql

See how much space is left in the flash recovery area (FRA)

select name
,      to_char (space_limit, '999,999,999,999') as space_limit
,      to_char (space_limit - space_used + space_reclaimable
,               '999,999,999,999')
           as space_available
,      round ( (space_used - space_reclaimable) / space_limit * 100, 1)
           as pct_full
from   v$recovery_file_dest;

or

# ==============================================
# Check FRA to see how much space is reclaimable
# ==============================================
fra_reclaim_check() {

LOGFILE=${RMAN_LogDir}/${ORACLE_SID}_`date '+%Y%m'`_${V_SCRIPT}.log
sqlplus "/ as sysdba" <<EOSQL > /tmp/results.$$
set vefify off echo off heading off newpage none linesize 1000 echo off
select 'Filesystem:'||name||
       '; Total(Gb):'||round((space_limit/1073741824),2) ||
       '; Reclaimable(Gb):'||round((space_reclaimable/1073741824),2) ||
       '; Reclaimable(%):'||round( ((space_reclaimable/1073741824)/(space_limit/1073741824)*100),2 )
from   v$recovery_file_dest
/
EOSQL
cat /tmp/results.$$ >> $LOGFILE
rm /tmp/results.$$

}

How far back can we flashback?

col time_now              for a25
col time_oldest_flashback for a25
select to_char(sysdate,'DD-MON-YY HH24:MI')                    time_now
,      to_char(fdl.oldest_flashback_time, 'DD-MON-YY HH24:MI') time_oldest_flashback
,      round((sysdate-fdl.oldest_flashback_time)*24,1)         hours_flashback_possible
from   v$database d
,      v$flashback_database_log fdl
/

DBA privs tables

 DBA_AQ_AGENT_PRIVS
 DBA_COL_PRIVS
 DBA_PRIV_AUDIT_OPTS
 DBA_REPGROUP_PRIVILEGES
 DBA_ROLE_PRIVS
 DBA_RSRC_CONSUMER_GROUP_PRIVS
 DBA_RSRC_MANAGER_SYSTEM_PRIVS
 DBA_SYS_PRIVS
 DBA_TAB_PRIVS
 DBA_WM_SYS_PRIVS
 DBA_WORKSPACE_PRIVS

Schedule a shell job

 begin
     dbms_scheduler.create_program ( program_name   => 'myshelljob'
                                   , program_action => '/home/oracle/scripts/shell.sh'
                                   , program_type   => 'EXECUTABLE'
                                   , comments       => 'Run a shell'
                                   , enabled        => TRUE
                                   );
 end;
 /

Start the job

 begin
     dbms_sheduler.create_job ( job_name     => 'myshelljob'
                              , program_name => 'myshelljob'
                              , start_date   =>
                              );
 /

What SQL statement is a user running?

select a.sid
,      a.serial#
,      b.sql_text
from   v$session a
,      v$sqlarea b
where  a.sql_address = b.address
and    a.username    = '&username'
/

See what SQL statements all users are running

Useful to see what is happening when process limit is reached

set lines 300 pages 1000

col username for a10
col command for 999 head "CMD"
col lockwait for a10
col status for a8
col schemaname for a12
col osuser for a10
col process for a10
col program for a20
col machine for a10
col action for a10
col module for a10
col identifier for a10
col event for a27
col state for a8
col service_name for a10
col serial# for 999999

select a.username
,      a.command
,      a.lockwait
,      a.status
,      a.schemaname
,      a.osuser
,      a.process
,      a.machine
,      a.program
,      a.type
--,      a.sql_exec_start
--,      a.plsql_object_id
--,      a.plsql_subprogram_id
--,      a.module
--,      a.action
,      a.logon_time
,      a.event
,      a.wait_time
,      a.seconds_in_wait
,      a.state
--,      a.service_name
,      a.serial#
,      b.sql_text
from   v$session a
,      v$sqlarea b
where  a.sql_address = b.address
order  by a.logon_time desc
/


set lines 80

A logminer session

Mine all SQL statements in a 10 minute period

sqlplus / as sysdba
alter session set nls_date_format = 'DD-MON-YYYY HH24:MI:SS';
execute dbms_logmnr.start_logmnr ( starttime => '27-OCT-2009 11:05:00'
                                 , endtime   => '27-OCT-2009 11:15:00'
                                 , options   => dbms_logmnr.dict_from_online_catalog+dbms_logmnr.continuous_mine
                                 );
create table mycontents as select * from v$logmnr_contents;
execute dbms_logmnr.end_logmnr();

select * from mycontents;

drop table mycontents;

Huge Pages

#!/bin/bash
# 
# hugepages_settings.sh
# 
# Linux bash script to compute values for the
# recommended HugePages/HugeTLB configuration
# 
# Note: This script does calculation for all shared memory
# segments available when the script is run, no matter it
# is an Oracle RDBMS shared memory segment or not.
# Check for the kernel version
KERN=`uname -r | awk -F. '{ printf("%d.%d\
",$1,$2); }'`
# Find out the HugePage size
HPG_SZ=`grep Hugepagesize /proc/meminfo | awk {'print $2'}`
# Start from 1 pages to be on the safe side and guarantee 1 free HugePage
NUM_PG=1
# Cumulative number of pages required to handle the running shared memory segments
for SEG_BYTES in `ipcs -m | awk {'print $5'} | grep "[[0-9]][[0-9]]*"`
do
   MIN_PG=`echo "$SEG_BYTES/($HPG_SZ*1024)" | bc -q`
   if [[ $MIN_PG -gt 0 ]]; then
      NUM_PG=`echo "$NUM_PG+$MIN_PG+1" | bc -q`
   fi
done
# Finish with results
case $KERN in
   '2.4') HUGETLB_POOL=`echo "$NUM_PG*$HPG_SZ/1024" | bc -q`;
          echo "Recommended setting: vm.hugetlb_pool = $HUGETLB_POOL" ;;
   '2.6' | '3.8') echo "Recommended setting: vm.nr_hugepages = $NUM_PG" ;;
    *) echo "Unrecognized kernel version $KERN. Exiting." ;;
esac
# End

Pretty longops listing

set lines 2000 pages 1000
col opname for a34 head "Job"
col message for a60
col perc_done for a10 head "Done"
col started for a18
col killer for a15
col mins_busy head "Mins busy"
col mins_left head "Mins left"
select sid||','||serial#                       killer
,      opname
,      message
,      round ((sofar/totalwork),4)*100||'%'    perc_done
,      to_char(start_time,'DD-MON-YY HH24:MI') started
,      floor(elapsed_seconds/60)               mins_busy
,      ceil(time_remaining/60)                 mins_left
from   v$session_longops
where  1=1
and    sofar     != totalwork
and    totalwork != 0
and    opname    like 'RMAN%'
/
select * from v$session where module='Data Pump Worker'

or attach to the impdp job and it shows percentage done.

Get the SID from v$session_longops and plug it into v$session to check the SQL command details.

Current Running SQLs
--------------------
set pages 1000 lines 2000
col program  format a40
col sql_text format a130

select s.sid
,      s.status
,      s.last_call_et
,      s.program
,      sa.sql_id
,      sa.sql_text
from   v$session s
,      v$sqlarea sa
where  s.sql_id = sa.sql_id
and    s.sid = '&sid'
/

set pages 1000 lines 2000
col USERNAME for a10
col OSUSER   for a10
col MACHINE  for a10

select s.sid
,      s.serial#
,      p.spid
,      s.username
,      s.osuser
,      s.status
,      s.process fg_pid
,      s.longon_time
,      s.machine
,      p.spid bg_pid
from   gv$session s
,      gv$process p
where  s.addr = p.addr
and    s.sid  = '&sid'
/

$ps -ef | grep <spid>

set pages 1000 lines 2000
SELECT INST_ID
,      SID
,      SERIAL#
,      SQL_ID
,      USERNAME
,      PROGRAM
,      MACHINE
,      SERVICE_NAME
FROM   GV$SESSION
WHERE  SID IN ('<SID_NO.1>','<SID_NO.2>')
/
Active Running SQLs
--------------------
set pages 1000 lines 2000
col SPID     for a10
col PROGRAM  for a15
col OSUSER   for a10
col ACTION   for a10
col EVENT    for a25
col SQL_TEXT for a25
col MACHINE  for a10
col P1TEXT   for a10
col P2TEXT   for a10
col P3TEXT   for a10
SELECT b.sid
,      b.serial#
,      a.spid
,      b.sql_id
,      b.program
,      b.osuser
,      b.machine
,      b.type
,      b.event
,      b.action
,      b.p1text
,      b.p2text
,      b.p3text
,      b.state
,      c.sql_text
,      b.logon_time
FROM   v$process a
,      v$session b
,      v$sqltext c
WHERE  a.addr           = b.paddr
AND    b.sql_hash_value = c.hash_value
AND    b.STATUS         = 'ACTIVE'
AND    b.sid            = '&sid'
ORDER  BY a.spid
,      c.piece
/

OR - Use the below Query

Longops.sql
-----------
set pages 50000 lines 32767
col OPNAME for a10
col SID form 9999
col SERIAL form 9999999
col PROGRAM for a10
col USERNAME for a10
col SQL_TEXT for a40
col START_TIME for a10
col LAST_UPDATE_TIME for a10
col TARGET for a25
col MESSAGE for a25

alter session set nls_date_format = 'DD-MM-YYYY HH24:MI:SS';

SELECT l.inst_id,l.sid, l.serial#, l.sql_id, l.opname, l.username, l.target, l.sofar, l.totalwork, l.start_time,l.last_update_time,round(l.time_remaining/60,2) "REMAIN MINS", round(l.elapsed_seconds/60,2) "ELAPSED MINS", round((l.time_remaining+l.elapsed_seconds)/60,2) "TOTAL MINS", ROUND(l.SOFAR/l.TOTALWORK*100,2) "%_COMPLETE", l.message,s.sql_text
FROM gv$session_longops l
LEFT OUTER JOIN v$sql s on s.hash_value=l.sql_hash_value and s.address=l.sql_address and s.child_number=0
WHERE l.OPNAME NOT LIKE 'RMAN%' AND l.OPNAME NOT LIKE '%aggregate%' AND l.TOTALWORK != 0 AND l.sofar<>l.totalwork AND l.time_remaining > 0
/

Apply the database patch after the ORACLE_HOME has been patched

ORAENV_ASK=NO
export ORACLE_SID=$1
. oraenv

sqlplus /nolog <<EOSQL
connect / as sysdba
startup
@?/rdbms/admin/catbundle.sql psu apply
@?/rdbms/admin/utlrp.sql
purge dba_recyclebin;

prompt check patch apply...
set lines 150
col action_time for a50
col comp_name for a50
col comments for a50

select action_time,action,comments from sys.registry\$history where trunc(action_time) = trunc(sysdate);
select comp_name,version,status,modified from dba_registry;
exit
EOSQL

Show how much archivelog data is generated per day

A companion script for 'how many log switches in a day'

col orderby noprint
select trunc(first_time) orderby
,      to_char(first_time,'DD-MON-YYYY') "Date"
,      round(sum(bytes/1024/1024/1024),2) arch_logs_gigs
from (
    select unique (blocks * block_size) bytes,
    thread#,
    sequence#,
    resetlogs_change#,
    first_time
    from v$archived_log
)
group by trunc(first_time)
,      to_char(first_time, 'DD-MON-YYYY')
order by trunc(first_time) desc
/

Abort a hanging database even when sysdba cannot connect

Unable to connect to database with sqlplus / as sysdba because audit file cannot be created?

Instead of just killing the pmon process, there is a kinder (and more useful) way to kill the database. Use the hidden, undocumented 'prelim' switch parameter to sqlplus to make a connection to the database without creating SGA session data structures. It can be very useful for hang analysis so the possible cause can be found after the database has been restarted.

sqlplus -prelim / as sysdba
oradebug unlimit
oradebug hanganalyze 3
oradebug setmypid              -- or oradebug setorapname diag
oradebug dump ashdumpseconds 30
oradebug dump systemstate 10   -- or oradebug dump systemstate 266
oradebug tracefile_name
shutdown abort

Lists Oracle session with kill session statement all ready to go

disconnect attempts to kill the o/s processes too. kill doesn't.

col killer for a60
col osuser for a12
col logon  for a23
col spid   for a8
col username for a15
col program for a50
set lines 2000
select 'alter system disconnect session '''||s.sid||','||s.serial#||''' immediate;' killer
,      s.inst_id
,      to_char(s.logon_time,'DD-MON-YY HH24:MI:SS') logon
,      sci.osuser
,      p.spid
,      s.username
,      s.program
from   gv$session s
,      gv$session_connect_info sci
,      gv$process p
where  1=1
and    p.addr     = s.paddr
and    p.inst_id  = s.inst_id
and    s.sid      = sci.sid
and    s.serial#  = sci.serial#
and    s.type    != 'BACKGROUND'
and    s.status   = 'INACTIVE'
order  by s.logon_time desc
/


KILLER                                                          INST_ID LOGON                   OSUSER       SPID     USERNAME        PROGRAM
------------------------------------------------------------ ---------- ----------------------- ------------ -------- --------------- --------------------------------------------------
alter system disconnect session '490,33947' immediate;                1 23-JUN-16 13:30:53      oraibm       23205    SYS             sqlplus@benouerp07 (TNS V1-V3)
alter system disconnect session '490,33947' immediate;                1 23-JUN-16 13:30:53      oraibm       23205    SYS             sqlplus@benouerp07 (TNS V1-V3)
alter system disconnect session '490,33947' immediate;                1 23-JUN-16 13:30:53      oraibm       23205    SYS             sqlplus@benouerp07 (TNS V1-V3)
alter system disconnect session '490,33947' immediate;                1 23-JUN-16 13:30:53      oraibm       23205    SYS             sqlplus@benouerp07 (TNS V1-V3)

Create a guaranteed restore point

create restore point beginning guarantee flashback database;

How much space is being used by a guaranteed restore point?

show parameter recovery
set numwi 15
set lines 200
col name for a40
col time for a23
col gigs_limit head "Max (but check df -g also!)"

select name
,      time
,      round(storage_size/1024/1024/1024,2) gigs_used
from   v$restore_point
where  1=1
and    guarantee_flashback_database = 'YES'
/

select * from v$flash_recovery_area_usage
/

select name
,      round(space_used/1024/1024/1024,2) gigs_used
,      round(space_limit/1024/1024/1024,2) gigs_limit
,      space_reclaimable
,      number_of_files
from v$recovery_file_dest
/

Get statistics for LOB columns in a table

select
  max(dbms_lob.getlength(lob_content)) max_len
, avg(dbms_lob.getlength(lob_content)) avg_len
, min(dbms_lob.getlength(lob_content)) min_len
, sum(case when dbms_lob.getlength(lob_content) <= 4000 then 1
else 0 end) "<4k"
, sum(case when dbms_lob.getlength(lob_content) > 4000 then 1
else 0 end) ">4k"
, sum(case when dbms_lob.getlength(lob_content) is null then 1 else
0 end) "is null"
from &tablename
/

badprivs.sql

Check to see if any ordinary users have privileges / access / grants / rights they should not have

select grantee, privilege, admin_option
from   sys.dba_sys_privs
where  (       privilege like '% ANY %'
        or     privilege in ('BECOME USER', 'UNLIMITED TABLESPACE')
        or     admin_option = 'YES'
       )
and    grantee not in ('SYS', 'SYSTEM', 'OUTLN', 'AQ_ADMINISTRATOR_ROLE',
                      'DBA', 'EXP_FULL_DATABASE', 'IMP_FULL_DATABASE',
                      'OEM_MONITOR', 'CTXSYS', 'DBSNMP', 'IFSSYS',
                      'IFSSYS$CM', 'MDSYS', 'ORDPLUGINS', 'ORDSYS',
                      'TIMESERIES_DBA')

Generate a list of tablespaces ready for a migration

set echo off
set lines 300
set pages 0
Set headi off
set feedb off
set long 32000

spool migration_create_target_tablespaces.sql

exec dbms_metadata.set_transform_param(dbms_metadata.session_transform,'SQLTERMINATOR',TRUE);
select DBMS_METADATA.GET_DDL('TABLESPACE',tablespace_name) from dba_tablespaces
/

spool off

List the privileges assigned to users on directories

table_name is the name of the directory…

select grantor
,      grantee
,      table_schema
,      table_name
,      privilege
from   all_tab_privs
where  table_name = 'DATA_PUMP_DIR'
/

List the privileges assigned to users on packages/procedures

table_name is the name of the package…

select grantor
,      grantee
,      table_schema
,      table_name
,      privilege
from   all_tab_privs
where  upper(table_name) like upper('%UTL_FILE%')
/

List users with DBA privilege (role)

all_db_do -q "select vi.instance_name,drp.grantee,drp.admin_option from v\$instance vi,dba_role_privs drp where drp.granted_role='DBA';"

List the system privileges assigned to a user (used to copy user as or clone user as)

Maybe this one is better Extract ddl with "dbms metadata.get ddl"

 SELECT LPAD(' ', 2*level) || granted_role "USER PRIVS"
 FROM (
   SELECT NULL grantee,  username granted_role
   FROM dba_users
   WHERE username LIKE UPPER('%&uname%')
   UNION
   SELECT grantee, granted_role
   FROM dba_role_privs
   UNION
   SELECT grantee, privilege
   FROM dba_sys_privs)
 START WITH grantee IS NULL
 CONNECT BY grantee = prior granted_role;

or

 SELECT path
 FROM (
   SELECT grantee,
          sys_connect_by_path(privilege, ':')||':'||grantee path
   FROM (
     SELECT grantee, privilege, 0 role
     FROM dba_sys_privs
     UNION ALL
     SELECT grantee, granted_role, 1 role
     FROM dba_role_privs)
   CONNECT BY privilege=prior grantee
   START WITH role = 0)
 WHERE grantee IN (
    SELECT username
    FROM dba_users
    WHERE lock_date IS NULL
    AND password != 'EXTERNAL'
    AND username != 'SYS')
 OR grantee='PUBLIC'

or

for a migration from 10g…

set headi off
set feedb off
set long  9999999
set pages 0
set lines 300

spool migration_create_target_users_grants.sql

select dbms_metadata.get_ddl('USER', username) || ';' ddl
from dba_users
where  1=1
and    username like upper('%&&USER_NAME%')
/

select 'grant '||privilege|| ' to ' || grantee || ';'
from   dba_sys_privs
where  1=1
and    grantee like upper('%&&USER_NAME%')
/

select 'grant '||granted_role|| ' to ' || grantee || ';'
from   dba_role_privs
where  1=1
and    grantee like upper('%&&USER_NAME%')
/

spool off

or for a migration on 11g…!

set head off
set pages 0
set long 9999999
col ddl for a1000


SELECT DBMS_METADATA.GET_DDL('USER', USERNAME) || ';' DDL
FROM DBA_USERS
where  1=1
and    default_tablespace not in ('SYSTEM','SYSAUX')
and    upper(username) like '%'||upper('&&username')||'%'
UNION ALL
SELECT DBMS_METADATA.GET_GRANTED_DDL('ROLE_GRANT', USERNAME) || ';' DDL
FROM DBA_USERS
where  1=1
and    default_tablespace not in ('SYSTEM','SYSAUX')
and    upper(username) like '%'||upper('&&username')||'%'
UNION ALL
SELECT DBMS_METADATA.GET_GRANTED_DDL('SYSTEM_GRANT', USERNAME) || ';' DDL
FROM DBA_USERS
where  1=1
and    default_tablespace not in ('SYSTEM','SYSAUX')
and    upper(username) like '%'||upper('&&username')||'%'
UNION ALL
SELECT DBMS_METADATA.GET_GRANTED_DDL('OBJECT_GRANT', USERNAME) || ';' DDL
FROM DBA_USERS
where  1=1
and    default_tablespace not in ('SYSTEM','SYSAUX')
and    upper(username) like '%'||upper('&&username')||'%'

spool migration_create_users_grants.sql
/
spool off

Move datafiles between filesystems

Also used to rename a datafile or tempfile using the traditional method

Check the status of the datafiles

set lines 1000 pages 100
col file_name for a70
select file_id, file_name, status, online_status from dba_data_files where tablespace_name = 'TS_THALER_DATA' order by 1;

or

Check the status of the tempfiles

set lines 1000 pages 100
col file_name for a70
select v.file#, t.file_name, v.status from dba_temp_files t, v$tempfile v where t.file_id = v.file# order by 1;

Set the relevant datafile offline

alter database datafile 125 offline;
alter database datafile 126 offline;
alter database datafile 127 offline;

Rename the files on the filesystem using o/s commands

This has to be done before the alter otherwise the alter will fail (it checks that the destination file exists)

alter tablespace ts_thaler_data_offline;

host mv '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_113.dbf' '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_113.dbf'
host mv '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_114.dbf' '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_114.dbf'
host mv '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_115.dbf' '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_115.dbf'

Run the relevant alter commands to inform the control files of the new locations

alter database move datafile '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_113.dbf' to '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_113.dbf'
alter database move datafile '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_114.dbf' to '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_114.dbf'
alter database move datafile '/cln/tst/ora_data1/clnt/data4/ts_thaler_data_115.dbf' to '/cln/tst/ora_data1/clnt/data1/ts_thaler_data_115.dbf'

Recover the datafiles

This step is skipped when renaming tempfiles

recover datafile 125;
recover datafile 126;
recover datafile 127;

Set the relevant datafile online

alter database datafile 125 online;
alter database datafile 126 online;
alter database datafile 127 online;

Rename a datafile on 12c and higher

If you're lucky enough to be on 12c or higher it's one command - no need to offline the datafile!

SQL> alter database move datafile '/oracle/ora_data2/prod/data4/ts_thaler_data_113.dbf' to '/oracle/ora_data2/prod/data3/ts_thaler_data_113.dbf';

Database altered.

It seems this method cannot be used for tempfiles - even though the error message suggests the database is aware of it! Have to use the traditional method above.

SQL> alter database move tempfile '/cln/acc/ora_data3/amla/temp_system/temp_system01.dbf' to '/cln/acc/ora_data3/amla/temp_system/temp_system_01.dbf';
alter database move tempfile '/cln/acc/ora_data3/amla/temp_system/temp_system01.dbf' to '/cln/acc/ora_data3/amla/temp_system/temp_system_01.dbf'
                    *
ERROR at line 1:
ORA-00905: missing keyword


SQL> alter database move datafile '/cln/acc/ora_data3/amla/temp_system/temp_system01.dbf' to '/cln/acc/ora_data3/amla/temp_system/temp_system_01.dbf';
alter database move datafile '/cln/acc/ora_data3/amla/temp_system/temp_system01.dbf' to '/cln/acc/ora_data3/amla/temp_system/temp_system_01.dbf'
*
ERROR at line 1:
ORA-01516: nonexistent log file, data file, or temporary file
"/cln/acc/ora_data3/amla/temp_system/temp_system01.dbf"

Pass parameter/argument into perl one-liner script from shell

Trick? Just use ARGV to pass them in…

FREQ=$(perl -e 'my @CAL=split(";",$ARGV[0]);shift @CAL;print $CAL[$ARGV[1]];' -- "$LINE" $offset)

or

DUMP_DATE=`perl -MTime::Local -e 'print(timelocal(0,$ARGV[0],$ARGV[1],$ARGV[2],$ARGV[3],$ARGV[4]))' -- $MI $HH $DD $MM $YYYY`

or export the variable from shell and access it via the ENV hash

export db=ENDP1
perl -p -i -e 's!DSMI_LOG.+$!DSMI_LOG           /oracle/$ENV{db}/admin/tdpoerror_$ENV{db}!' ${db}/admin/tdpo.opt

Return epoch seconds in Perl

perl -e 'print time();'

or to convert a specific day of the year to epoch seconds

use POSIX;
my $epochsecs = mktime(0,0,0, $jday, 0, $year-1900);

or for a specific date (careful though - month is 0-11 not 1-12!…

use Time::Local;
#                          SS MM HH DD MM YYYY
my $epochsecs = timelocal (30,10,19,03,04,2014);

or more long-winded but delimited by anything… Supply the date/time in any delimited format. Eg. YYYY-MM-DD:HH:MI

#!/usr/bin/perl -w
use Time::Local;

sub date2epoch { 
  my($s) = @_;
  my($year, $month, $day, $hour, $minute, $second);

  if($s =~ m{^\s*(\d{1,4})\W*0*(\d{1,2})\W*0*(\d{1,2})\W*0*
                 (\d{0,2})\W*0*(\d{0,2})\W*0*(\d{0,2})}x) {
    $year = $1;  $month = $2;   $day = $3;
    $hour = $4;  $minute = $5;  $second = $6;
    $hour |= 0;  $minute |= 0;  $second |= 0;  # defaults.
    $year = ($year<100 ? ($year<70 ? 2000+$year : 1900+$year) : $year);
    return timelocal($second,$minute,$hour,$day,$month-1,$year);  
  }
  return -1;
}

Return epoch seconds in DOS/VBS/Windows

Paste this code into epoch.vbs

function date2epoch(p_date)
    date2epoch = DateDiff("s", "01/01/1970 00:00:00", p_date)
end function

Wscript.Echo date2epoch(Now())

and call from a DOS box like this

cscript //nologo epoch.vbs

Return date from epoch seconds in Perl

perl -e 'print scalar (localtime (1243269270))'

Return epoch seconds from julian day number in Perl

use POSIX;
my ($year, $jday) = (2012,60);
my $epochsecs = mktime(0,0,0, $jday, 0, $year-1900);

Extract specific pieces of a date from an epoch timestamp in Perl

use POSIX;
# return localtime into an array
my @tm = localtime $epochsecs;

my $yyyymmdd = strftime "%Y%m%d", @tm;
print $yyyymmdd."\
";

Return epoch seconds in Shell (linux)

date -d "2014/04/03 18:34:30" "+%s"

Return date from epoch seconds in Shell (linux)

date -d @1396542870

Return epoch seconds in PL/SQL

You cannot just use sysdate as that is timezone sensitive… (epoch seconds are in UTC/GMT)

select round((cast(sys_extract_utc(systimestamp) as date) - to_date('01-JAN-1970 00:00:00','DD-MON-YYYY HH24:MI:SS'))*24*60*60) from dual;
or
select round((vd.created - to_date('01-JAN-1970 00:00:00','DD-MON-YYYY HH24:MI:SS'))*24*60*60)  created from v$database vd;

How long has this instance been up (in minutes)?

select round((cast(sys_extract_utc(systimestamp) as date) - startup_time)*24*60) up_mins from v$instance;

Return date from epoch seconds in SQL

select to_char(to_date('19700101','YYYYMMDD') + ((&epoch)/24/60/60),'DD-MON-YYYY HH24:MI:SS') utc_date from dual;

A database overview using the sys.plato package

 exec plato.help;

or

 exec plato.complete('HTM');

List datafiles for a tablespace

select *
from   dba_data_files
where  1=1
and    tablespace_name ='&ts_name';

Show graph of available and free space in tablespaces

-- =============================================================================
-- File Name    : http://www.oracle-base.com/dba/monitoring/ts_free_space.sql
-- Author       : Tim Hall
-- Description  : Displays a list of tablespaces and their used/full status.
-- Requirements : Access to the DBA views.
-- Call Syntax  : @ts_free_space.sql
-- Last Modified: 13-OCT-2012 - Created. Based on ts_full.sql
--
-- Modification History
-- ====================
-- When      Who               What
-- ========= ================= =================================================
-- 13-NOV-13 Stuart Barkley    Added cols free_pct and used_pct
-- =============================================================================
SET PAGESIZE 140 lines 180
COLUMN used_pct FORMAT A11
COLUMN max_used_pct FORMAT A11

    SELECT tablespace_name                                                                      tablespace_name
    ,      size_mb                                                                              size_mb
    ,      free_mb                                                                              free_mb
    ,      TRUNC((free_mb/size_mb) * 100)                                                       free_pct
    ,      RPAD(' '|| RPAD('X',ROUND((size_mb-free_mb)/size_mb*10,0), 'X'),11,'-')              used_pct
    ,      max_size_mb                                                                          max_size_mb
    ,      max_free_mb                                                                          max_free_mb
    ,      TRUNC((max_free_mb/max_size_mb) * 100)                                               max_free_pct
    ,      RPAD(' '|| RPAD('X',ROUND((max_size_mb-max_free_mb)/max_size_mb*10,0), 'X'),11,'-')  max_used_pct
    from   (
           select a.tablespace_name                                         tablespace_name
           ,      b.size_mb                                                 size_mb
           ,      a.free_mb                                                 free_mb
           ,      b.max_size_mb                                             max_size_mb
           ,      a.free_mb + (b.max_size_mb - b.size_mb)                   max_free_mb
           from   (
                  select tablespace_name                                    tablespace_name
                  ,      trunc(sum(bytes)/1024/1024)                        free_mb
                  from   dba_free_space
                  group  by tablespace_name
                  ) a
           ,      (
                  select tablespace_name                                    tablespace_name
                  ,        trunc(sum(bytes)/1024/1024)                      size_mb
                  ,        trunc(sum(greatest(bytes,maxbytes))/1024/1024)   max_size_mb
                  from   dba_data_files
                  group by tablespace_name
                  ) b
           ,      v$instance vi
           where  a.tablespace_name = b.tablespace_name
           )
    order  by tablespace_name;

set lines 80

Another method for calculating free space in tablespace using segments

select df.tablespace_name "Tablespace"
,      totalusedspace "Used MB"
,      (df.totalspace - tu.totalusedspace) "Free MB"
,      df.totalspace "Total MB"
,      round(100 * ( (df.totalspace - tu.totalusedspace)/ df.totalspace)) "Pct. Free"
from   (
       select tablespace_name
,      round(sum(bytes) / 1048576) totalspace
       from   dba_data_files
       group  by tablespace_name
       ) df
,      (
       select round(sum(bytes)/(1024*1024)) totalusedspace
       ,      tablespace_name
       from   dba_segments
       group  by tablespace_name
       ) tu
where  df.tablespace_name = tu.tablespace_name;

Work out maximum possible size of a datafile

select round( (4*1024*1024*value) / (1024*1024*1024) ,2) maxfilesizeingigabytes from v$parameter where name='db_block_size';

or, as tablespaces can have block sizes differing from that of the init.ora parameter…

select round( (4*1024*1024*block_size) / (1024*1024*1024) ,2) maxfilesizeingigabytes from dba_tablespaces where tablespace_name='&tablespace_name';

Show size and maxsize at datafile level

datafiles

set pages 100 lines 200
col file_name for a60
select sum(bytes)/1024/1024      mbytes_alloc
,      sum(maxbytes)/1024/1024   mbytes_max
,      file_name
from   sys.dba_data_files
group  by file_name
/

tempfiles

set pages 100 lines 200
col file_name for a60
select sum(bytes)/1024/1024      mbytes_alloc
,      sum(maxbytes)/1024/1024   mbytes_max
,      file_name
from   sys.dba_temp_files
group  by file_name
/

Resize the online redo logfiles

Found a more concise, smarter way of doing it here - oracle-wiki.net Switching logs too often? Redolog files too small?

set lines 1000
col group_member for a60
select l.group#          group_number
,      (bytes/1024/1024) megs
,      l.status          group_status
,      f.member          group_member
,      l.archived        archived
,      f.type            file_type
from   v$log     l
,      v$logfile f
where  l.group# = f.group#
order  by l.group#
,      f.member
/
define SID=TIBQFW
alter database add logfile group 6  '/oracle/&SID/oradata1/redo1.log' size 1G;
alter database add logfile group 7  '/oracle/&SID/oradata2/redo2.log' size 1G;
alter database add logfile group 8  '/oracle/&SID/oradata3/redo3.log' size 1G;
alter database add logfile group 9  '/oracle/&SID/oradata1/redo4.log' size 1G;
alter database add logfile group 10 '/oracle/&SID/oradata2/redo5.log' size 1G;

or if you use multiple members…

alter database add logfile group 6 ('/oracle/&SID/oradata1/redo/redo01a.log', '/oracle/&SID/oradata1/redo01b') size 50M;
alter database add logfile group 7 ('/oracle/&SID/oradata1/redo/redo02a.log', '/oracle/&SID/oradata1/redo02b') size 50M;
alter database add logfile group 8 ('/oracle/&SID/oradata1/redo/redo03a.log', '/oracle/&SID/oradata1/redo03b') size 50M;
alter system switch logfile;

If the old logs are not yet inactive, checkpoint the database

alter system checkpoint;
alter database drop logfile group 1;
alter database drop logfile group 2;
alter database drop logfile group 3;
alter database drop logfile group 4;
alter database drop logfile group 5;

Increase the existing size of a datafile

alter database datafile '&full_path_of_datafile' resize &new_meg_size.M;

Increase the max size of a tempfile

alter database tempfile '/oracle/ENDP1/oradata90/temp01.dbf' autoextend on maxsize unlimited;

Find out what processes are using the temp tablespace

Temporary tablespace is used for sorting query results. Find them with this.

SELECT b.tablespace,b.segfile#,b.segblk#,b.blocks,a.sid,a.serial#,
a.username,a.osuser, a.status
FROM v$session a,v$sort_usage b
WHERE a.saddr = b.session_addr;

Increase the maximum size of a datafile

Although this can be set lower than existing size, try not to. It makes the reports look weird!

alter database
      datafile '&full_path_of_datafile'
      autoextend on
      maxsize &new_meg_size.m;

Find schemas (users) with objects created outside their default tablespace

Indexes could be a notable exception if they are created in an index tablespace

set lines 200
col owner           for a20
col segment_name    for a45
col tablespace_name for a32
select owner
,      segment_name
,      tablespace_name
from   dba_segments
where  exists (select username from dba_users where username = owner)
and    tablespace_name != (select default_tablespace from dba_users where username = owner)
and    owner = '&owner'
/

Add a datafile to a tablespace

'unlimited' will default to the maximum datafile size for the current db_block_size

alter tablepace &tablespace_name
      add datafile '&full_path_of_datafile'
      size 100M
      autoextend on
      next 100M
      maxsize unlimited

List all datafiles with their size

set pages 1000 lines 2000
col filetype for a15
col name for a60
break on report
compute sum of gig on report

select filetype
,      name
,      gig
from   (
       select 'datafile' filetype
       ,      name
       ,      bytes/1024/1024 gig
       from   v$datafile
       union all
       select 'tempfile' filetype
       ,      name
       ,      bytes/1024/1024
       from   v$tempfile
       union all
       select 'logfile' filetype
       ,      lf.member "name"
       ,      l.bytes/1024/1024
       from   v$logfile lf
       ,      v$log     l
       where  1=1
       and    lf.group# = l.group#
       ) used
,      (
       select sum(bytes)/1024/1024
       from   dba_free_space
       ) free

Database size (How big is my database?)

There are different interpretations of what constitutes the size of a database… Is it the space allocated to the datafiles or only the space taken up by the data? Are tempfiles included? Are redo logs included? etc…

set lines 132
col dbname for a9  heading "Database"
col dbsize for a15 heading "Size"
col dbused for a15 heading "Used space"
col dbfree for a15 heading "Free space"
with used_space as
(
select  bytes
from    v$datafile
union   all
select  bytes
from    v$tempfile
union   all
select  bytes
from    v$log
)
, free_space as
(
select sum(bytes) sum_bytes
from dba_free_space
)
select vd.name                                                                                            dbname
,      round(sum(used_space.bytes)/1024/1024/1024 ) || ' GB'                                              dbsize
,      round(sum(used_space.bytes)/1024/1024/1024 ) - round(free_space.sum_bytes/1024/1024/1024) || ' GB' dbused
,      round(free_space.sum_bytes/1024/1024/1024) || ' GB'                                                dbfree
from   free_space
,      used_space
,      v$database vd
group  by vd.name
,      free_space.sum_bytes
/

Drop datafile from temp tablespace

Database is completely stuck because the temporary tablespace has grown to fill all available room. Look around for at least some log files to delete in order to get connected (audit files, sqlnet logs, listener logs) Once connected, change the default temporary tablespace to a small one so we can remove the original.

create temporary tablespace temp2 tempfile '/oracle/RMANV12/oradata2/temp2.dbf' size 5M;
alter database default temporary tablespace temp2;

I found trying to drop the datafile like this didn't work, just hung there (probably active tranactions)…

drop tablespace temp including contents and datafiles;

So did it like this…

alter database tempfile '/oracle/RMANV12/oradata2/temp.dbf' drop including datafiles;

Database altered.

drop tablespace temp including contents and datafiles;

Tablespace dropped.

… an alternative would be to find the sessions using the temp space and kill them…

select sid, serial#, status from v$session where serial# in (select session_num from v$sort_usage);

Then to be neat and tidy, rename the temp2 tablespace back to temp.

create temporary tablespace temp tempfile '/oracle/RMANV12/oradata2/temp.dbf' size 256M reuse autoextend on next 128M maxsize 1G;
alter database default temporary tablespace temp;
drop tablespace temp2 including contents and datafiles;

Although the last statement will not work until any transactions using it have been rolled back or committed.

List files that are in hot-backup mode

 set lines 100
 set pages 100
 col name format a60
 select df.name
 ,      b.status
 ,      to_char(time, 'hh24:mi:ss dd/mm/yyyy') time
 from   v$datafile   df
 ,      v$backup     b
 where  1=1
 and    df.file# = b.file#
 and    b.status = 'ACTIVE'
 order  by b.file#;

Waiting for snapshot control file enqueue

Script to identify sessions causing RMAN to hang because of 'waiting for snapshot control file enqueue' message

set lines 2000
col killer     for a70
col program    for a20
col module     for a20
col action     for a20
col logon_time for a20

select 'alter system disconnect session '''||s.sid||','||s.serial#||''' immediate;' killer
,      username
,      program
,      module
,      action
,      logon_time
from   v$session s
,      v$enqueue_lock l
where  l.sid  = s.sid
and    l.type = 'CF'
and    l.id1  = 0
and    l.id2  = 2
/

Info on blocking processes

select s1.username || '@' || s1.machine
       || ' ( SID=' || s1.sid || ' )  is blocking '
       || s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) '  blocking_status
from   v$lock l1
,      v$session s1
,      v$lock l2
,      v$session s2
where  s1.sid     = l1.sid
and    s2.sid     = l2.sid
and    l1.block   = 1
and    l2.request > 0
and    l1.id1     = l2.id1
and    l2.id2     = l2.id2
/

session doing the blocking

select *
from  v$lock l1
where 1=1
and   block = 1 ;

sessions being blocked

select *
from   v$lock l1
where  1=1
and    id2 = 85203

info on session doing the blocking

select row_wait_obj#, row_wait_file#, row_wait_block#, row_wait_row#
from   v$session
where  sid = 234

select do.object_name
,      row_wait_obj#
,      row_wait_file#
,      row_wait_block#
,      row_wait_row#
,      dbms_rowid.rowid_create ( 1, ROW_WAIT_OBJ#, ROW_WAIT_FILE#, ROW_WAIT_BLOCK#, ROW_WAIT_ROW# ) row_id
from   v$session   s
,      dba_objects do
where  sid=234
and    s.ROW_WAIT_OBJ# = do.OBJECT_ID;

Show locked objects

 set lines 100
 set pages 999
 col username   format a20
 col sess_id    format a10
 col object     format a25
 col mode_held  format a10
 select oracle_username||' ('||s.osuser||')' username
 ,      s.sid||','||s.serial# sess_id
 ,      owner||'.'||object_name object
 ,      object_type
 ,      decode ( l.block
               , 0, 'Not Blocking'
               , 1, 'Blocking'
               , 2, 'Global'
               ) status
 ,      decode ( v.locked_mode
               , 0, 'None'
               , 1, 'Null'
               , 2, 'Row-S (SS)'
               , 3, 'Row-X (SX)'
               , 4, 'Share'
               , 5, 'S/Row-X (SSX)'
               , 6, 'Exclusive', TO_CHAR(lmode)
               ) mode_held
 from   v$locked_object v
 ,      dba_objects d
 ,      v$lock l
 ,      v$session s
 where  1=1
 and    v.object_id = d.object_id
 and    v.object_id = l.id1
 and    v.session_id = s.sid
 order  by oracle_username
 ,\tsession_id;

Show which row is locked

 select do.object_name
 ,      row_wait_obj#
 ,      row_wait_file#
 ,      row_wait_block#
 ,      row_wait_row#
 ,      dbms_rowid.rowid_create (1, ROW_WAIT_OBJ#, ROW_WAIT_FILE#, ROW_WAIT_BLOCK#, ROW_WAIT_ROW#)
 from   v$session   s
 ,      dba_objects do
 where  1=1
 and    s.ROW_WAIT_OBJ# = do.OBJECT_ID
 and    sid             = &sid;

Check what is audited on a database

set pagesize 100
set linesize 256
select audit_option,success,failure from dba_stmt_audit_opts;

How old is the oldest audit record?

select extract (day from (systimestamp - min(ntimestamp#))) days_old from aud$;

Check what objects are being audited

tti col 60 'Object Audit Options (dba_obj_audit_opts)' skip col 50 'A=By Access, S=By Session. Left=on success, right=on failure' skip 2
set lines 1000 pages 100
col alt for a3 head "Alt|er"
col aud for a3 head "Aud|it"
col com for a3 head "Com|ent"
col del for a3 head "Del|ete"
col gra for a3 head "Gra|nt"
col ind for a3 head "Ind|ex"
col ins for a3 head "Ins|ert"
col loc for a3 head "Lo|ck"
col ren for a3 head "Ren|ame"
col sel for a3 head "Sel|ect"
col upd for a3 head "Upd|ate"
col ref for a3 head "Ref|rnc"
col exe for a3 head "Exe|cut"
col cre for a3 head "Cre|ate"
col rea for a3 head "Re|ad"
col wri for a3 head "Wri|te"
col fbk for a3 head "Fla|sh"
\t
select *
from   dba_obj_audit_opts
/

Procedure to delete old audit records

select text from all_source where lower(name)='purge_audit_trail';

TEXT
------------------------------------------------------------------------------------------------
PROCEDURE purge_audit_trail IS
BEGIN
-- set the last archive timestamp
DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
       audit_trail_type    => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
       last_archive_time   => systimestamp - interval '365' day,
       rac_instance_number => null
                                          );
-- purge audit trail till last archive timestamp
DBMS_AUDIT_MGMT.CLEAN_AUDIT_TRAIL( audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD
, use_last_arch_timestamp => TRUE);

END purge_audit_trail;

Job setup to run the audit purge

BEGIN
sys.dbms_scheduler.create_job(
    job_name => '"SYS"."PURGE_AUDIT_TRAIL_JOB"',
    job_type => 'STORED_PROCEDURE',
    job_action => '"SYS"."PURGE_AUDIT_TRAIL"',
    repeat_interval => 'FREQ=DAILY;BYHOUR=20;BYMINUTE=0;BYSECOND=0',
    start_date => to_timestamp_tz('2012-02-22 Europe/Paris', 'YYYY-MM-DD TZR'),
    job_class => '"DEFAULT_JOB_CLASS"',
    comments => 'purge audit trails until last archive timestamp',
    auto_drop => FALSE,
    enabled => FALSE
);
sys.dbms_scheduler.set_attribute( name => '"SYS"."PURGE_AUDIT_TRAIL_JOB"', attribute => 'logging_level', value => DBMS_SCHEDULER.LOGGING_OFF);
sys.dbms_scheduler.set_attribute( name => '"SYS"."PURGE_AUDIT_TRAIL_JOB"', attribute => 'job_weight', value => 1);
sys.dbms_scheduler.enable( '"SYS"."PURGE_AUDIT_TRAIL_JOB"' );
END;
/

A list of all SIDs in oratab that should restart after a server boot

Using awk

awk -F: '$NF=="Y" && $1 !~/[#]/ {print $1}'  /etc/oratabab

Using perl

perl -F: -ne 'print "$1\n" if /(\w+):(.+):Y$/' /etc/oratab

Move the admin directories to a new location

./all_db_do "alter system set audit_file_dest='/oracle/&SID/admin/adump' scope=spfile;"

./all_db_do "shutdown immediate"

for db in `awk -F: 'NF && $1 !~/[#*]/ {print $1}'  /etc/oratab`
do
    echo $db
    mkdir -p /oracle/${db}/admin
    echo "/oracle/${db}/admin created"
    mv /oracle/admin/${db}/adump /oracle/${db}/admin/
    mv /oracle/admin/${db}/dpdump /oracle/${db}/admin/
    echo "Moved /oracle/admin/${db}/adump to /oracle/${db}/admin/"
done

./all_db_do startup

./all_db_do "create pfile from spfile;"

Check if the Standby database is up-to-date with the primary database

 select arch.thread# "Thread"
 ,      arch.sequence# "Last Sequence Received"
 ,      appl.sequence# "Last Sequence Applied"
 ,      (arch.sequence# - appl.sequence#) "Difference"
 from   (
        select thread#
        ,      sequence#
        from   v$archived_log
        where  1=1
        and    ( thread#,first_time ) in (
                                         select thread#, max(first_time)
                                         from   v$archived_log
                                         group  by thread#
                                         )
        ) arch
        ,
        (
        select thread#
        ,      sequence#
        from   v$log_history
        where  1=1
        and    ( thread#,first_time ) in ( select thread# ,max(first_time)
                                           from   v$log_history
                                           group  by thread#
                                          )
        ) appl
 where  1=1
 and    arch.thread# = appl.thread#
 order  by 1;

Check for gaps on the Standby database

Run this on the standby

select thread#, low_sequence#, high_sequence# from v$archive_gap;

or run this on the Primary to see the latest generated archive…

select thread#   "Thread"
,      sequence# "Last sequence generated"
from   v$archived_log
where  (thread#, first_time) in ( select thread#, max(first_time)
                                  from   v$archived_log
                                  group  by thread#
                                )
order  by 1
/

and run this on the standby to check for received but not yet applied logs (this could be normal if a lag has been set)…

select arch.thread# "Thread"
,      arch.sequence# "Last sequence received"
,      appl.sequence# "Last sequence applied"
,      (arch.sequence#-appl.sequence#) "Difference"
from   (
       select thread#
       ,      sequence#
       from   v$archived_log
       where  (thread#,first_time ) in (select thread#, max(first_time)
                                        from   v$archived_log
                                        group  by thread#
                                       )
       ) arch
,      (
       select thread#
       ,      sequence#
       from   v$log_history
       where  (thread#,first_time ) in (select thread#, max(first_time)
                                        from   v$log_history
                                        group  by thread#
                                       )
      ) appl
where arch.thread# = appl.thread#
order by 1
/

Compare the results from Primary and Secondary to detect possible network problems or other errors in shipping.

Ignore case-sensitive passwords in 11g (deprecated in 12c)

alter system set SEC_CASE_SENSITIVE_LOGON = false scope=both

External table for the Alert log (pre 11g)

Modified to use (the better) scripts found on Rene Nyffenegger's site This script creates the custom alertlog table
Ensure the owning schema has the following privileges:
create any directory drop any directory

-- ================
-- to be run as sys
-- ================

-- ---------------------------------------------------
-- create an external table based on the text alertlog
-- ---------------------------------------------------
column db    new_value _db    noprint;
column bdump new_value _bdump noprint;

select instance_name db from v$instance;

select value bdump
from   v$parameter
where  name = 'background_dump_dest';

drop   directory bdump;
create directory bdump as '&&_bdump';

grant read,write on directory bdump to public;

drop   table monuser.alert_log_disk;
create table monuser.alert_log_disk ( text varchar2(2000) )
organization external (
type oracle_loader
default directory bdump
access parameters ( records delimited by newline nologfile nobadfile
                    fields terminated by "&" ltrim
                  )
location('alert_&&_db..log')
)
reject limit unlimited;


-- ------------------------------
-- create a custom alertlog table
-- ------------------------------
drop table monuser.alert_log;
create table monuser.alert_log (
    indx         number
,   message_date date
,   message_text varchar2(2000)
)
storage (initial 512k next 512K pctincrease 0);

create index monuser.alert_log_u1 on monuser.alert_log(indx)
storage (initial 512k next 512K pctincrease 0);

create index monuser.alert_log_u2 on monuser.alert_log(message_date)
storage (initial 512k next 512K pctincrease 0);

drop   sequence monuser.alert_log_s1;
create sequence monuser.alert_log_s1 minvalue 1 increment by 1;


-- -----------------------------------------------------------------
-- make this custom table appear the same as the more modern version
-- -----------------------------------------------------------------
drop   view monuser.x$dbgalertext;
create view monuser.x$dbgalertext as
select indx
,      message_date
,      message_text
from   monuser.alert_log;

drop public synonym x$dbgalertext;
create public synonym x$dbgalertext for monuser.x$dbgalertext;

Now, after the two tables are created, the alert_log table can be filled with the following script.
It only loads those records that are greater than the last time it loaded. And it loads the date/time on every line for convienence.
It also helps when the alertlogs get rotated. You still keep the history within an Oracle table.
Finally, it also strips out all the crap that is really not needed to see if you are looking for errors.

set serveroutput on
alter session set current_schema=monuser;

declare

  isdate         number := 0;
  start_updating number := 0;
  rows_inserted  number := 0;

  message_date   date;
  max_date       date;

  message_text  alert_log_disk.text%type;

begin

    dbms_output.enable(null);

    /* find a starting date */
    select max(message_date) into max_date from alert_log;

    if (max_date is null) then
        max_date := to_date('01-jan-1980', 'dd-mon-yyyy');
    end if;

    for r in (
        select substr(text,1,2000) message_text
        from   alert_log_disk
        where  text not like '%offlining%'
        and    text not like 'ARC_:%'
        and    text not like '%LOG_ARCHIVE_DEST_1%'
        and    text not like '%Thread 1 advanced to log sequence%'
        and    text not like '%Current log#%seq#%mem#%'
        and    text not like '%Undo Segment%lined%'
        and    text not like '%alter tablespace%back%'
        and    text not like '%Log actively being archived by another process%'
        and    text not like '%alter database backup controlfile to trace%'
        and    text not like '%Created Undo Segment%'
        and    text not like '%started with pid%'
        and    text not like '%ORA-12012%'
        and    text not like '%ORA-06512%'
        and    text not like '%ORA-000060:%'
        and    text not like '%coalesce%'
        and    text not like '%Beginning log switch checkpoint up to RBA%'
        and    text not like '%Completed checkpoint up to RBA%'
        and    text not like '%specifies an obsolete parameter%'
        and    text not like '%BEGIN BACKUP%'
        and    text not like '%END BACKUP%'
    )
    loop

        isdate       := 0;
        message_text := null;

        select count(*)
        into   isdate
        from   dual
        where  substr(r.message_text, 21) in ('2019','2020','2021','2022','2023')
        and    r.message_text not like '%cycle_run_year%';

        if (isdate = 1) then
            select to_date(substr(r.message_text, 5),'Mon dd hh24:mi:ss rrrr')
            into message_date
            from dual;
            if (message_date > max_date) then
                start_updating := 1;
            end if;
        else
            message_text := r.message_text;
        end if;

        if (message_text is not null) and (start_updating = 1) then
            insert into alert_log (indx, message_date, message_text) values (alert_log_s1.nextval, message_date, message_text);
            rows_inserted := rows_inserted+1;
        end if;

    end loop;

    sys.dbms_output.put_line('Inserting after: '||to_char(max_date, 'DD-MON-RR HH24:MI:SS'));
    sys.dbms_output.put_line('Rows Inserted:   '||rows_inserted);

    commit;

end;
/

Update the alert_log table periodically from crontab with the alerts generated since the last run

#!/usr/bin/ksh

ORAENV_ASK=NO
export ORACLE_SID=fin
export PATH="/usr/local/bin:${PATH}"
. oraenv

sqlplus / as sysdba<<EOSQL
start /oracle/scripts/update_custom_alertlog
EOSQL

Alert log (11g+)

This reads the log.xml file (which contains the same information as alert.log) Show the messages put in the alert log in the last 24 hours

select substr(MESSAGE_TEXT, 1, 300) message_text
,      count(*) cnt
from   X$DBGALERTEXT
where  1=1
and    (MESSAGE_TEXT like '%ORA-%' or upper(MESSAGE_TEXT) like '%ERROR%')
and    cast(ORIGINATING_TIMESTAMP as DATE) > sysdate - &days_ago/1440
group  by substr(MESSAGE_TEXT, 1, 300)

There is also a fixed table X$DBGDIREXT which returns all file and directory names under diagnostic_dest/diag directory:

select lpad(' ',lvl,' ')||logical_file file_name
from   x$dbgdirext
where  rownum <=20;

Directory paths available in v$diag_info

set lines 2000
col name for a35
col value for a100 wrap
select * from v$diag_info;

   INST_ID NAME                                VALUE                                                                                                    CON_ID
---------- ----------------------------------- ---------------------------------------------------------------------------------------------------- ----------
         1 Diag Enabled                        TRUE                                                                                                       0
         1 ADR Base                            /cln/prg/ora_bin2/app/oracle                                                                               0
         1 ADR Home                            /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild                                                          0
         1 Diag Trace                          /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/trace                                                    0
         1 Diag Alert                          /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/alert                                                    0
         1 Diag Incident                       /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/incident                                                 0
         1 Diag Cdump                          /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/cdump                                                    0
         1 Health Monitor                      /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/hm                                                       0
         1 Default Trace File                  /cln/prg/ora_bin2/app/oracle/diag/rdbms/bild/bild/trace/bild_ora_44696768.trc                              0
         1 Active Problem Count                0                                                                                                          0
         1 Active Incident Count               0                                                                                                          0

11 rows selected.

Find out the names of the active trace files for the running processes

select pid, program, tracefile from v$process;

Request was to duplicate a large tablespace from production to test environment

…but rather than copy the terabytes of data, create the destination tablespace with just the most recent months data

See what the source data looks like

col table_owner    for a20
col table_name     for a30
col partition_name for a20
set pages 100 lines 1000

select table_owner
,      table_name
,      partition_name
,      partition_position
,      tablespace_name
from   dba_tab_partitions
where  tablespace_name = 'ADS_ARCHIVE'
order  by 1,2,3,4
/

This query produces something like this…

TABLE_OWNER          TABLE_NAME                     PARTITION_NAME       PARTITION_POSITION TABLESPACE_NAME
-------------------- ------------------------------ -------------------- ------------------ ------------------------------
ADS_ARCHIVE          BIL_DAY_INV_CTR_ARC            Y2017_Q1_M01_D01                      1 ADS_ARCHIVE
ADS_ARCHIVE          BIL_HIS_DETAIL_FAC_ARC         Y2016_Q2_M05_D04                      1 ADS_ARCHIVE
ADS_ARCHIVE          BIL_HIS_DETAIL_FAC_ARC         Y2016_Q2_M05_D09                      2 ADS_ARCHIVE
ADS_ARCHIVE          BIL_MTH_INV_CTR_ARC            Y2017_Q1_M01_D01                      1 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_CBA_BGAAP_ARC          Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_CBA_BGAAP_ARC          Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_CBA_IFRS_ARC           Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_CBA_IFRS_ARC           Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2012_Q1_M03_D31                      3 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2012_Q2_M04_D01                      4 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2012_Q4_M10_D12                      5 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2012_Q4_M10_D13                      6 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC          Y2012_Q4_M10_D14                      7 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC_TST      Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          FAH_DAY_MVT_BGAAP_ARC_TST      Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
...
ADS_ARCHIVE          SBP_HIS_LINES_ARC              P_LS                                  3 ADS_ARCHIVE
ADS_ARCHIVE          THA_DAY_INV_CTR_ARC            Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          THA_DAY_INV_CTR_ARC            Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_HEAVY_ARC              Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_HEAVY_ARC              Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D12                      2 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D13                      3 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D15                      4 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D16                      5 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D17                      6 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D18                      7 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D19                      8 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D20                      9 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D21                     10 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D22                     11 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D23                     12 ADS_ARCHIVE
ADS_ARCHIVE          THA_INV_LIGHT_ARC              Y2012_Q4_M10_D24                     13 ADS_ARCHIVE
ADS_ARCHIVE          THA_MTH_INV_CTR_ARC            Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          THA_MTH_INV_CTR_ARC            Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          THA_MTH_INV_CTR_ARC            Y2013_Q2_M04_D30                      3 ADS_ARCHIVE
ADS_ARCHIVE          XBD01_TEMP_HIS_ARC             Y2012_Q4_M12_D31                      1 ADS_ARCHIVE
ADS_ARCHIVE          T_RPT_RECO_THA_INV_ACC_ARC     Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          T_RPT_RECO_THA_INV_ACC_ARC     Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          T_RPT_RECO_THA_INV_MTH_ARC     Y2011_Q4_M12_D30                      1 ADS_ARCHIVE
ADS_ARCHIVE          T_RPT_RECO_THA_INV_MTH_ARC     Y2011_Q4_M12_D31                      2 ADS_ARCHIVE
ADS_ARCHIVE          XBD01_TEMP_HIS_ARC             Y2012_Q4_M12_D31                      1 ADS_ARCHIVE
ADS_DBA              FAH_DAY_CBA_BGAAP              Y2016_Q3_M07_D12                   1657 ADS_ARCHIVE
ADS_DBA              FAH_DAY_CBA_BGAAP              Y2016_Q4_M12_D13                   1811 ADS_ARCHIVE
ADS_DBA              FAH_DAY_MVT_IFRS               Y2016_Q4_M10_D10                   1654 ADS_ARCHIVE

Capture the tablespace metadata

set long 1000000
select dbms_metadata.get_ddl ( 'TABLESPACE', 'ADS_ARCHIVE') from dual
/

produces…

CREATE TABLESPACE "ADS_ARCHIVE" DATAFILE
  '/oracle/tst/ora_data3/adst/ads_archive01.dbf' SIZE 104857600
  AUTOEXTEND ON NEXT 104857600 MAXSIZE 32000M,
...
  '/oracle/tst/ora_data3/adst/ads_archive66.dbf' SIZE 104857600
  AUTOEXTEND ON NEXT 104857600 MAXSIZE 3200M,
  LOGGING ONLINE PERMANENT BLOCKSIZE 8192
  EXTENT MANAGEMENT LOCAL AUTOALLOCATE DEFAULT
 NOCOMPRESS  SEGMENT SPACE MANAGEMENT AUTO

Capture the tablespace objects' metadata

cat<<EOCAT >expdp_adst_ads_archive_metadata.par
userid='/ as sysdba'
dumpfile=adst_ads_archive_metadata.dmp
logfile=expdp_adst_ads_archive_metadata.log
tablespaces=ADS_ARCHIVE
parallel=20
content=metadata_only
exclude=statistics
EOCAT

expdp parfile=expdp_adst_ads_archive_metadata.par

Find the partitions with data from the most recent month

cat <<EOCAT>/tmp/adst_ads_archive_partitions.sql
set termo off echo off lines 1000 feedb off headi off newpa none trims on
spool /tmp/adst_ads_archive_partitions.lst
select table_owner||'.'||table_name||':'||partition_name
from   dba_tab_partitions
where  tablespace_name = 'ADS_ARCHIVE'
and    partition_name like 'Y2017_Q1_M01%'
/
spool off

echo "@/tmp/adst_ads_archive_partitions.sql" | sqlplus / as sysdba
EOCAT

produces…

ADS_ARCHIVE.FAH_DAY_CBA_BGAAP_ARC:Y2017_Q1_M01_D07
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D26
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D25
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D19
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D18
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D12
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D11
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D05
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D26
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D25
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D19
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D18
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D12
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D11
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D05
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M02_D04
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D29
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D28
...
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D22
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D21
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D15
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D14
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D08
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D07
ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC:Y2017_Q1_M01_D01
ADS_ARCHIVE.BIL_MTH_INV_CTR_ARC:Y2017_Q1_M01_D01
ADS_ARCHIVE.BIL_DAY_INV_CTR_ARC:Y2017_Q1_M01_D01
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M02_D04
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M01_D29
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M01_D28
ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC:Y2017_Q1_M01_D22

Export the data in these partitions

cat<<EOCAT >expdp_adst_ads_archive_partitions.par
userid='/ as sysdba'
dumpfile=adst_ads_archive_Y2017_%U.dmp
logfile=expdp_adst_ads_archive_Y2017.log
EOCAT

cat /tmp/adst_ads_archive_partitions.lst | perl -p -i -e 's/^/tables=/'g >>expdp_adst_ads_archive_partitions.par

cat<<EOCAT >>expdp_adst_ads_archive_partitions.par
parallel=20
exclude=statistics
EOCAT

expdp parfile=expdp_adst_ads_archive_partitions.par

Drop the old tablespace (make sure you are on the destination database!)

drop tablespace ads_archive including contents and datafiles
/

Recreate the tablepspace using the metadata capture above

create tablespace "ADS_ARCHIVE"
datafile '/oracle/tst/ora_data3/adst/ads_archive01.dbf' size 100M autoextend on next 50M maxsize unlimited
,        '/oracle/tst/ora_data3/adst/ads_archive02.dbf' size 100M autoextend on next 50M maxsize unlimited
,        '/oracle/tst/ora_data3/adst/ads_archive03.dbf' size 100M autoextend on next 50M maxsize unlimited
,        '/oracle/tst/ora_data3/adst/ads_archive04.dbf' size 100M autoextend on next 50M maxsize unlimited
,        '/oracle/tst/ora_data3/adst/ads_archive05.dbf' size 100M autoextend on next 50M maxsize unlimited
/

Import the tablespace metadata

cat<<EOCAT >impdp_adst_ads_archive_metadata.par
userid='/ as sysdba'
dumpfile=adst_ads_archive_metadata.dmp
logfile=impdp_adst_ads_archive_metadata.log
EOCAT

impdp parfile=impdp_adst_ads_archive_metadata.par

select 'alter index '|| owner||'.'||index_name||' rebuild online parallel 8;'
from   dba_indexes
where  status = 'UNUSABLE'
/

Import the last month partition data

cat<<EOCAT >impdp_adst_ads_archive_partitions.par
userid='/ as sysdba'
dumpfile=adst_ads_archive_Y2017_%U.dmp
logfile=impdp_adst_ads_archive_Y2017.log
table_exists_action=append
parallel=20
EOCAT

impdp parfile=impdp_adst_ads_archive_partitions.par

Write to a TCP/IP socket from PL/SQL

Like writing to a pipe to get data out to the shell from PL/SQL

declare
 bt_conn               utl_tcp.connection;
 retval                binary_integer;
 l_sequence            varchar2(30) := 'Sequence Number 101021';
begin
 bt_conn := utl_tcp.open_connection(remote_host => '10.10.1.1'
                                   ,remote_port => 5000
                                   ,tx_timeout => 1
                                   )
 ;
 retval := utl_tcp.write_line(bt_conn,l_sequence)
 ;
 utl_tcp.flush(bt_conn)
 ;
 utl_tcp.close_connection(bt_conn)
 ;
exception
 when others then
  raise_application_error(-20101, sqlerrm)
  ;
  utl_tcp.close_connection(bt_conn)
  ;
end
;
/

and read from it using netcat in another session

nc -lk 5000

produces

Sequence Number 101021
set feed off head off lines 1000 pages 0 newpa none termo off trims on
spool /tmp/drop_db_links_for_sid_&_CONNECT_IDENTIFIER..sql

select 'drop public database link "'||db_link||'";'
from   dba_db_links
where  owner = 'PUBLIC'
/
select 'create or replace procedure '||owner||'.drop_'||replace(db_link,'.','')||' as begin execute immediate ''drop database link "'||db_link||'"''; end;'
from   dba_db_links
where  owner not in ('SYS','PUBLIC')
/
select 'exec '||owner||'.drop_'||replace(db_link,'.','')||';'
from   dba_db_links
where  owner not in ('SYS','PUBLIC')
/
select 'drop procedure '||owner||'.drop_'||replace(db_link,'.','')||';'
from   dba_db_links
where  owner not in ('SYS','PUBLIC')
/
spool off

set termo on lines 80 head on feed on newpa 1
prompt @/tmp/drop_db_links_for_sid_&_CONNECT_IDENTIFIER
#!/usr/bin/ksh
# ==============================================================================
# Name         : drop_database_links.ksh
# Description  : Drop all database links on an instance
#
# Parameters   : instance name
#
# Example      : ./drop_database_links.ksh orgm
#
# Modification History
# ====================
# When      Who               What
# ========= ================= ==================================================
# 09-OCT-19 Stuart Barkley    Created
# ==============================================================================

# --------------------------
# setup database environment
# --------------------------
if [[ $# -ne 1 ]]; then
    echo "ERROR: SID must be passed as argument"
    exit 1
fi

ORAENV_ASK=NO
export ORACLE_SID="${1}"
. oraenv >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
    echo "ERROR: Failed to setup environment for ${ORACLE_SID}."
    exit 1
fi

TMPFILE="/tmp/dba_db_links_to_drop_${ORACLE_SID}.log"

# ----------------------------------------
# build procedure to remove database links
# ----------------------------------------
"${ORACLE_HOME}/bin/sqlplus" -s '/ as sysdba'<<-EOSQL >"${TMPFILE}" 2>&1
    whenever sqlerror continue
    set headi off newpa none feedb off verif off trims on
    create or replace procedure drop_dblink ( p_schema_name in varchar2
                                            , p_db_link     in varchar2
                                            ) as
        plsql   varchar2(4000);
        cur     number;
        uid     number;
        rc      number;
    begin
        select u.user_id
        into   uid
        from   dba_users u
        where  u.username = p_schema_name
        ;
        plsql := 'drop database link "'||p_db_link||'"';
        cur := sys.dbms_sys_sql.open_cursor;
        sys.dbms_sys_sql.parse_as_user ( c             => cur
                                       , statement     => plsql
                                       , language_flag => dbms_sql.native
                                       , userid        => uid
                                       );
        rc := sys.dbms_sys_sql.execute (cur);
        sys.dbms_sys_sql.close_cursor (cur);
    end;
    /

    spool /tmp/dba_db_links_to_drop.$$.sql
    select 'drop public database link '||db_link||';'
    from   dba_db_links
    where  owner = 'PUBLIC'
    /
    select 'exec drop_dblink( '''||owner||''''||','||''''||db_link||''''||');'
    from   dba_db_links
    where  owner != 'PUBLIC'
    /
    spool off
rem    start /tmp/dba_db_links_to_drop.$$.sql
EOSQL
grep 'ORA-' "${TMPFILE}" >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
    echo "ERROR: Unable to complete dropping of database links:"
    cat "${TMPFILE}"
    rm -rf "${TMPFILE}" /tmp/dba_db_links_to_drop.$$.sql
    exit 1
fi
rm -rf "${TMPFILE}" /tmp/dba_db_links_to_drop.$$.sql
exit 0

* borrowed from this asktom question

set serveroutput on 
begin
    dbms_output.enable(null);
    for f in (select *
              from   dba_db_links
              order  by owner
              ,      db_link
             )
    loop
        dbms_scheduler.create_job ( job_name     => f.owner||'.dblink'
                                  , job_type     => 'PLSQL_BLOCK'
                                  , job_action   => 'declare '
                                                  ||'    x char;'
                                                  ||'begin '
                                                  ||'    select dummy into x from dual@'||f.db_link||';'
                                                  ||'    dbms_output.put_line('''||f.owner||' '||f.db_link||' OK'');'
                                                  ||'end;'
                                  );
        begin
            dbms_scheduler.run_job (f.owner||'.dblink', true);
        exception
        when others then
            dbms_output.put_line (f.owner||' '||f.db_link||' NOK ('||sqlerrm||')');
        end;
        dbms_scheduler.drop_job (f.owner||'.dblink');
    end loop;
end;
/

Execute a script as a different user

Some things have to be done as the user and cannot be done as sys (eg. creating and dropping jobs or create and drop a database link) It is possible to login as the user by saving, changing and resetting his password (using identified by values) but using dbms_sys_sql is another option Create a PL/SQL procedure to drop a db link

create or replace procedure drop_dblink ( p_schema_name in varchar2
                                        , p_dblink      in varchar2
                                        ) is
    l_plsql   varchar2(1000);
    l_cur     number;
    l_uid     number;
    l_rc      number;
begin
    select  du.user_id
    into    l_uid
    from    dba_users du
    where   du.username = p_schema_name
    ;
    plsql := 'drop database link "'||p_dblink||'"';
    l_cur := sys.dbms_sys_sql.open_cursor;
    sys.dbms_sys_sql.parse_as_user(
        c             => l_cur,
        statement     => l_plsql,
        language_flag => dbms_sql.native,
        userid        => l_uid
    );
    l_rc := sys.dbms_sys_sql.execute(l_cur);
    sys.dbms_sys_sql.close_cursor(l_cur);
end;
/

Generate the calls to the procedure with the required parameters

select 'exec drop_dblink ('''|| owner ||''''||', '||''''||db_link||''''||');'
from   dba_db_links
where  owner   != 'PUBLIC'
and    db_link like '%FTST%'
or     db_link like '%HDEV%'
/

and this reveals the lines to be executed

exec drop_dblink ('USER1', 'DBL_FTST.WORLD');
exec drop_dblink ('USER1', 'DBL_HDEV.WORLD');

Drop the procedure when finished with it

drop procedure drop_dblink;

or do it as an anonymous block in one go…

declare
    uid number;
    sqltext varchar2(1000) := 'drop database link "&DB_LINK"';
    myint integer;
begin
    select user_id into uid from dba_users where username = '&USERNAME';
    myint:=sys.dbms_sys_sql.open_cursor();
    sys.dbms_sys_sql.parse_as_user(myint,sqltext,dbms_sql.native,UID);
    sys.dbms_sys_sql.close_cursor(myint);
end;
/

Handling single quote marks inside SQL statements

SQL> SELECT 'D''COSTA' name FROM DUAL;

NAME
-------
D'COSTA

Alternatively, use the 10g+ quoting method:

SQL> SELECT q'$D'COSTA$' NAME FROM DUAL;

NAME
-------
D'COSTA

Run a script on all databases in /etc/oratab

export ORAENV_ASK=NO
for db in `awk -F: 'NF && $1 !~/[#*]/ {print $1}'  /etc/oratab`
do
    echo $db
    ORACLE_SID=$db
    . oraenv
    sqlplus -V
done

Schedule a cron job to run on the last day of each month

0 23 28-31 * * [ $(date -d +1day +%d) -eq 1 ] && su - oracle -c "dsmc  arch -server=SO_U_`hostname`_ORX_M_SOL '/oracle/export/PV6/expdp_PV6_D_FULL_`date +\%Y\%m\%d`*'"

How to tell if a year is a leap year or not - using cal instead of reinventing the wheel

Test: Why is Julian day 0, 17th November 1858?
Pretty comprehensive answer: here and here
Also attempts to answer the question why typing 'cal 09 1752' on Unix systems looks so odd!

function leap {
# This function tells you if the year passed in as a parameter is a leap year or not...
if [[ $(cal 2 ${1:-$(date '+%Y')} | egrep '\\<[[:digit:]]{1,2}\\>' | wc -w) -eq 29 ]]; then
    leap='Y'
else
    leap='N'
fi
echo ${leap}
}

Clone an Oracle Home

Cloning an ORACLE_HOME saves much time if this has to be done several times.

On source server

cd $ORACLE_HOME
tar -cvf - . | gzip -c > clonehome.tar.gz

Copy the archive to the destination server.

On the destination server

mkdir -p <where you want the new ORACLE_HOME to be>
export ORACLE_HOME=<above directory>
gunzip -c clonehome.tar.gz | tar -xvf -
${ORACLE_HOME}/perl/bin/perl ${ORACLE_HOME}/clone/bin/clone.pl ORACLE_BASE="<path as far as oracle>" ORACLE_HOME="$ORACLE_HOME" ORACLE_HOME_NAME="OraDB12Home2"

As root
Check logfile for path to oraInventory

<path to oraInventory>/orainstRoot.sh
<path to ORACLE_HOME>/root.sh

Reset an expired (and/or locked) user password to the same value

Can be used to save user passwords for example in a test environment before destroying them with a copy of production.

set lines 200 pages 200 head off
col user_list for a200 head "Expired User List"

select 'alter user '||su.name||' identified by values '''||decode( su.spare4
                                                                 , null, decode( su.password
                                                                               , null, 'not identified'
                                                                               , su.password
                                                                               )
                                                                 , decode( su.password
                                                                         , null, su.spare4
                                                                         , su.spare4||';'||su.password
                                                                         )
                                                                 )||'''account unlock;' stmt
from   sys.user$ su
,      dba_users du
where  1=1
and    su.name              = du.username
and    du.oracle_maintained = 'N'
and    (account_status      like '%EXPIRED%' or account_status like '%LOCKED%')
order  by su.name
/

More industrialised version. Create a package.

create or replace package dba_utils
as
    procedure reset_password ( p_userlist in varchar2 );
end dba_utils;
/


create or replace package body dba_utils
as

procedure reset_password ( p_userlist in varchar2 )
is
    cursor c1 ( p_userlist in varchar2 ) is
    select username
    ,      regexp_substr ( dbms_metadata.get_ddl('USER', username), '''[^'']+''' ) pwd
    from   dba_users
    where  1=1
    and    username in upper( p_userlist )
    ;
begin
    dbms_output.enable(null);
    for r1 in c1 ( p_userlist )
    loop
        begin
            execute immediate 'alter user ' || r1.username || ' account unlock';
            execute immediate 'alter user ' || r1.username || ' identified by values ' || r1.pwd;
        exception
        when others then
            dbms_output.put_line('Unable to modify user ' || r1.username);
        end;
    end loop;
end reset_password;

end dba_utils;
/

Call it

set serveroutput on
exec dba_utils.reset_password ('STUART');

shell

Alert Log scraping with ADRCI

Count the number of background processes for a particular SID

SID=RAVJDE1
ps -ef|cut -c54-100|awk -v SID=$SID '$0 ~ /'$SID'$/ {print}'|wc -l

or …this will work with any OS (so long as the process is at the end of the <strong>ps -ef</strong> or <strong>ps -aux</strong> listing

SID=RAVJDE1
ps -ef|awk -v SID=$SID -F_ '{ $NF ~ /'$SID'$/ {print} }'|wc -l

Pick the columns you want in the ps listing

ps -efo user,pid,ppid=MOM -o args

Find Oracle errors in the most recent DataPump log files

00 09 * * * /home/tools/scripts/oracle/dosh 'find /oracle/export -name "expdp*log" -mtime -1 -exec grep ORA- {} \; -ls' | mailx -s 'Datapump errors for <customer>last night' [email protected]

Find the 10 largest directories on a particular filesystem

du -ag /oracle | sort -nr | head -n 10

Find the 10 largest files on a particular mount point

find /home -size +1g -ls | sort -nr +6 | head -10

or

find /home -xdev -ls | sort -nr +6 | head -10

Split a file in pieces using sed

Using fixed line numbers

sed -ne '1,12p' file > text.part1
sed -ne '13,$p' file > text.part2

Using line with a /pattern/ on it

sed -e '/hello/q' tt.dat | grep -v hello > text.part1
sed -n `sed -n '/hello/=' tt.dat`',$p' tt.dat | grep -v hello > text.part2

Split a file in pieces using Perl

perl -ne 'print if 1 .. /marker line/' text.dat >text.part1
perl -ne 'print if /marker line/ .. eof()' text.dat >text.part2

Find and display RMAN errors in an RMAN log file

sed -ne '/^RMAN-/,/^$/p'  RMAN_<SID>_<DATE>.log

cmd

Perl

For all files in a directory, replace a keyword (SSIIDD) in a file with another (a parameter)

for i in `ls`
do
    perl -p -i -e 's/SSIIDD/'$SID'/g' $i
done

Parse tnsnames.ora to get just the service names

from here

perl -ne 'print if m/^([^#()\W ][a-zA-Z.]*(?:[.][a-zA-Z]*\s?=)?)/' $TNS_ADMIN/tnsnames.ora

and use that to check the listener status

for svc in `perl -ne 'print if m/^([^#()\W ][a-zA-Z.]*(?:[.][a-zA-Z]*\s?=)?)/' $TNS_ADMIN/tnsnames.ora | sed -e 's/=//'` ;do
    tnsping $svc
done