handy_scripts
Differences
This shows you the differences between two versions of the page.
| Both sides previous revisionPrevious revision | |||
| handy_scripts [2018/12/08 12:49] – created 0.0.0.0 | handy_scripts [2024/07/15 20:30] (current) – external edit 127.0.0.1 | ||
|---|---|---|---|
| Line 1: | Line 1: | ||
| - | ====== Handy_scripts ====== | + | ==== Download java grants / permissions so they can be reissued after a database duplication |
| + | Ref [[https:// | ||
| + | < | ||
| + | TS=$(date ' | ||
| - | ======SQL====== | + | rm -f " |
| - | * [[Database Overview - from idevelopment]] [[http:// | + | |
| - | * [[Extract ddl with "dbms metadata.get ddl"]] | + | sqlplus -s / as sysdba<<' |
| - | * [[http:// | + | set echo off feed off newpa none head off lines 1000 pages 0 trims on |
| - | * [[http:// | + | column stmt format a500 word_wrapped |
| - | * [[http:// | + | select 'exec ' |
| - | =====List all instances running on a server===== | + | from ( |
| - | Sounds simple enough and indeed pre 12.1, is was as easy as either | + | |
| - | <code> | + | , |
| - | or this one is safer in case you have an underscore character in your SID name!! | + | |
| - | < | + | |
| + | | ||
| + | | ||
| + | | ||
| + | , | ||
| + | | ||
| + | , | ||
| + | select seq seq | ||
| + | , grantee | ||
| + | , to_number(substr(name, | ||
| + | , substr(name, | ||
| + | , substr(name, | ||
| + | from | ||
| + | where grantee not in (' | ||
| + | and type_name | ||
| + | ) a | ||
| + | | ||
| + | ) | ||
| + | order by seq | ||
| + | / | ||
| + | EOSQL | ||
| + | </ | ||
| + | |||
| + | ==== Use wget to download patches etc. from Oracle ==== | ||
| + | Using MOS / Metalink credentials, | ||
| + | < | ||
| + | #!/bin/sh | ||
| + | |||
| + | # | ||
| + | # Generated 6/21/20 4:57 PM | ||
| + | # Start of user configurable variables | ||
| + | # | ||
| + | LANG=C | ||
| + | export LANG | ||
| + | |||
| + | # Trap to cleanup cookie file in case of unexpected exits. | ||
| + | trap 'rm -f $COOKIE_FILE; | ||
| + | |||
| + | # SSO username | ||
| + | printf 'SSO UserName:' | ||
| + | read SSO_USERNAME | ||
| + | printf 'SSO Password:' | ||
| + | read SSO_PASSWORD | ||
| + | |||
| + | |||
| + | |||
| + | # Path to wget command | ||
| + | WGET=/ | ||
| + | |||
| + | # Log directory and file | ||
| + | LOGDIR=. | ||
| + | LOGFILE=$LOGDIR/ | ||
| + | |||
| + | # Print wget version info | ||
| + | echo "Wget version info: | ||
| + | ------------------------------ | ||
| + | $($WGET -V) | ||
| + | ------------------------------" | ||
| + | |||
| + | # Location of cookie file | ||
| + | COOKIE_FILE=$(mktemp -t wget_sh_XXXXXX) >> " | ||
| + | if [ $? -ne 0 ] || [ -z " | ||
| + | then | ||
| + | echo " | ||
| + | exit 1 | ||
| + | fi | ||
| + | echo " | ||
| + | |||
| + | # Output directory and file | ||
| + | OUTPUT_DIR=. | ||
| + | |||
| + | # | ||
| + | # End of user configurable variable | ||
| + | # | ||
| + | |||
| + | # The following command to authenticate uses HTTPS. This will work only if the wget in the environment | ||
| + | # where this script will be executed was compiled with OpenSSL. | ||
| + | # | ||
| + | # $WGET --secure-protocol=auto --save-cookies=" | ||
| + | | ||
| + | |||
| + | # Verify if authentication is successful | ||
| + | if [ $? -ne 0 ] | ||
| + | then | ||
| + | echo " | ||
| + | echo " | ||
| + | else | ||
| + | echo " | ||
| + | |||
| + | | ||
| + | |||
| + | | ||
| + | |||
| + | fi | ||
| + | |||
| + | # Cleanup | ||
| + | rm -f " | ||
| + | echo " | ||
| + | |||
| + | </ | ||
| + | ===== SQL ===== | ||
| + | * [[Database Overview - from idevelopment]] [[http:// | ||
| + | * [[Extract ddl with dbms metadata.get ddl]] | ||
| + | * [[http:// | ||
| + | * [[http:// | ||
| + | * [[http:// | ||
| + | ==== List all instances running on a server ==== | ||
| + | Sounds simple enough and indeed pre 12.1, is was as easy as | ||
| + | < | ||
| + | RUNNING_SIDS=" | ||
| + | </ | ||
| but now we have the added complication of CDB/PDB (container / pluggable databases) so a fair bit more work is needed! | but now we have the added complication of CDB/PDB (container / pluggable databases) so a fair bit more work is needed! | ||
| - | < | + | < |
| + | # | ||
| + | # ============================================================================== | ||
| + | # Name : list_server_db | ||
| + | # Description | ||
| + | # | ||
| + | # Parameters | ||
| + | # | ||
| + | # Notes : none | ||
| + | # | ||
| + | # Modification History | ||
| + | # ==================== | ||
| + | # When Who | ||
| + | # ========= ================= ================================================== | ||
| + | # 21-DEC-16 Stuart Barkley | ||
| + | # ============================================================================== | ||
| + | for db in $(ps -ef | grep [[p]]mon|awk -F_ ' | ||
| + | do | ||
| + | export ORACLE_SID=${db} | ||
| + | ORAENV_ASK=NO | ||
| + | . oraenv >/ | ||
| + | sqlplus -s / as sysdba<< | ||
| + | set serveroutput on feed off pages 0 head off newpa none | ||
| + | declare | ||
| + | e_col_not_found exception; | ||
| + | e_not_mounted | ||
| + | pragma | ||
| + | pragma | ||
| + | l_db_name | ||
| + | l_database_role varchar2(20); | ||
| + | l_cdb | ||
| + | l_status | ||
| + | type varchar2_tab is table of varchar2(240) index by binary_integer; | ||
| + | t_pdbs | ||
| + | begin | ||
| + | begin | ||
| + | dbms_output.enable(null); | ||
| + | execute immediate ' | ||
| + | if l_cdb = ' | ||
| + | l_status := ' | ||
| + | execute immediate ' | ||
| + | for i in nvl(t_pdbs.first, | ||
| + | loop | ||
| + | dbms_output.put_line (t_pdbs(i)||': | ||
| + | end loop; | ||
| + | else | ||
| + | l_status := ' | ||
| + | end if; | ||
| + | if l_database_role like ' | ||
| + | l_status := ' | ||
| + | end if; | ||
| + | exception | ||
| + | when e_not_mounted then | ||
| + | l_status := 'Not Mounted'; | ||
| + | when e_col_not_found then | ||
| + | l_status := ' | ||
| + | end; | ||
| + | |||
| + | dbms_output.put_line (' | ||
| + | |||
| + | end; | ||
| + | / | ||
| + | EOSQL | ||
| + | done | ||
| + | </ | ||
| - | =====Run a command or script on all databases in tnsnames.ora===== | + | ==== Run a command or script on all databases in tnsnames.ora ==== |
| - | Very handy if you have a global tnsnames.ora accessible to all clients.<br /> | + | Very handy if you have a global tnsnames.ora accessible to all clients. |
| - | So from a client install, this has access to all databases.<br /> | + | So from a client install, this has access to all databases. |
| - | Any files created can be created on the client so no need to scp to all the machines to recover spool files etc.<br /> | + | Any files created can be created on the client so no need to scp to all the machines to recover spool files etc. |
| Downside: Cannot connect as sysdba across tns so password will need to be known for whatever user is used. | Downside: Cannot connect as sysdba across tns so password will need to be known for whatever user is used. | ||
| - | < | + | < |
| + | #!/bin/ksh | ||
| + | TNSNAMES=" | ||
| + | [[ ! -r " | ||
| - | =====Run a command or script on all databases | + | SIDLIST=$(perl -ne 'print " |
| - | <code>4@@</code> | + | for SID in ${SIDLIST} |
| - | < | + | do |
| + | echo " | ||
| + | ls -al | grep " | ||
| + | if [[ $? -ne 0 ]]; then | ||
| + | echo ' | ||
| + | else | ||
| + | echo " | ||
| + | fi | ||
| + | done | ||
| + | </ | ||
| - | =====Run | + | ==== Run a command |
| - | It can be used to execute any sql command | + | < |
| - | * not got this working properly yet * | + | # |
| - | < | + | # ============================================================================== |
| + | # Name : all_db_do | ||
| + | # Description | ||
| + | # specified SQL command - be careful! | ||
| + | # | ||
| + | # Parameters | ||
| + | # -q(uiet) - just the results | ||
| + | # -i(nstance list) - if provided, only run on these instances | ||
| + | # -f(ilename) | <quoted SQL command | ||
| + | # | ||
| + | # Examples | ||
| + | # all_db_do -v 'alter system switch logfile;' | ||
| + | # all_db_do -f sessions.sql | ||
| + | # all_db_do -q -i "orgt rpad reportt" | ||
| + | # all_db_do -v "alter system set local_listener=' | ||
| + | # all_db_do " | ||
| + | # | ||
| + | # Notes : If -b is specified | ||
| + | # logfile | ||
| + | # | ||
| + | # Modification History | ||
| + | # ==================== | ||
| + | # When Who | ||
| + | # ========= ================= ================================================== | ||
| + | # 14-JAN-13 Stuart Barkley | ||
| + | # 25-FEB-13 Stuart Barkley | ||
| + | # 21-MAR-13 Stuart Barkley | ||
| + | # 29-APR-13 Stuart Barkley | ||
| + | # 24-MAY-13 Stuart Barkley | ||
| + | # 23-JUN-16 Stuart Barkley | ||
| + | # 23-SEP-21 Stuart Barkley | ||
| + | # ============================================================================== | ||
| - | =====Run a script on all databases listed in tnsnames.ora===== | + | PROGNAME=$(basename $0) |
| - | Can run an SQL command file on all SID/SERVICE found in a tnsnames.ora file. I use this from a client laptop having a global tns file on it.<br /> | + | OS=$(uname -s) |
| + | |||
| + | AWK=$(which awk) | ||
| + | GREP=$(which grep) | ||
| + | if [[ " | ||
| + | AWK=/ | ||
| + | GREP=/ | ||
| + | fi | ||
| + | |||
| + | if [[ ! -r /etc/oratab ]]; then | ||
| + | if [[ " | ||
| + | echo " | ||
| + | else | ||
| + | echo " | ||
| + | fi | ||
| + | fi | ||
| + | |||
| + | # ------------------------- | ||
| + | # get the arguments, if any | ||
| + | # ------------------------- | ||
| + | unset BACKGROUND VERBOSE SIDLIST | ||
| + | BACKGROUND=false | ||
| + | while getopts " | ||
| + | do | ||
| + | case " | ||
| + | b) BACKGROUND=true;; | ||
| + | t) TNS=' | ||
| + | v) VERBOSE=1;; | ||
| + | q) QUIET=" | ||
| + | f) FILE2RUN=${OPTARG};; | ||
| + | i) SIDLIST=${OPTARG};; | ||
| + | *) echo " | ||
| + | esac | ||
| + | done | ||
| + | shift $((OPTIND-1)) | ||
| + | |||
| + | # ---------------------------------------------------------------------------------- | ||
| + | # if no instances supplied, take all those where the restart parameter is set to " | ||
| + | # ---------------------------------------------------------------------------------- | ||
| + | if [[ " | ||
| + | SIDLIST=" | ||
| + | fi | ||
| + | |||
| + | if [[ -z ${FILE2RUN} ]]; then | ||
| + | # no file, need an SQL supplied | ||
| + | if [[ -z $1 ]]; then | ||
| + | echo " | ||
| + | echo "eg: $0 -f sessions.sql" | ||
| + | echo "eg: $0 -v 'alter system switch logfile;'" | ||
| + | exit 1 | ||
| + | fi | ||
| + | SQL=$1 | ||
| + | elif [[ -s ${FILE2RUN} ]]; then | ||
| + | # file supplied and exists | ||
| + | SQL=" | ||
| + | else | ||
| + | # file supplied, does it exist | ||
| + | echo "File ${FILE2RUN} is empty or does not exist" | ||
| + | echo " | ||
| + | echo "eg: $0 -f sessions.sql" | ||
| + | echo "eg: $0 -v 'alter system switch logfile;'" | ||
| + | exit 1 | ||
| + | fi | ||
| + | |||
| + | echo " | ||
| + | [[ $? -ne 0 ]] && PATH="/ | ||
| + | |||
| + | ORAENV_ASK=NO | ||
| + | for db in ${SIDLIST} | ||
| + | do | ||
| + | if ( [[ $1 != " | ||
| + | [[ ! -z " | ||
| + | continue | ||
| + | fi | ||
| + | echo | ||
| + | [[ ! -z " | ||
| + | export ORACLE_SID=$db | ||
| + | . oraenv >/ | ||
| + | [[ $? -ne 0 ]] && echo " | ||
| + | case " | ||
| + | (false) | ||
| + | " | ||
| + | define SID=" | ||
| + | col comp_name format a50 | ||
| + | col value for a40 | ||
| + | col filepath for a200 | ||
| + | set lines 1000 pages 100 $QUIET | ||
| + | $SQL | ||
| + | EOSQL | ||
| + | true | ||
| + | ;; | ||
| + | (true) | ||
| + | " | ||
| + | define SID=" | ||
| + | col comp_name format a50 | ||
| + | col value for a40 | ||
| + | col filepath for a200 | ||
| + | set lines 1000 pages 100 $QUIET | ||
| + | $SQL | ||
| + | EOSQL | ||
| + | true | ||
| + | ;; | ||
| + | (*) | ||
| + | echo " | ||
| + | false | ||
| + | ;; | ||
| + | esac | ||
| + | [[ ! -z " | ||
| + | done | ||
| + | wait | ||
| + | echo "INFO: All done" | ||
| + | </ | ||
| + | < | ||
| + | all_db_do " | ||
| + | |||
| + | all_db_do " | ||
| + | |||
| + | all_db_do " | ||
| + | |||
| + | all_db_do 'alter system set log_archive_dest_1=" | ||
| + | </ | ||
| + | |||
| + | ==== Run an SQL command on behalf of another user ==== | ||
| + | It can be used to execute any sql command as a specified user provided this procedure is created in sys schema. | ||
| + | * not got this working properly yet * | ||
| + | < | ||
| + | set serveroutput on | ||
| + | create or replace procedure run_sql_as_user ( p_schema_name in varchar2 | ||
| + | , p_sql in varchar2 | ||
| + | ) is | ||
| + | l_user_id dba_users.user_id%type; | ||
| + | l_cursor | ||
| + | l_rc number; | ||
| + | begin | ||
| + | dbms_output.enable(null); | ||
| + | begin | ||
| + | select u.user_id | ||
| + | into | ||
| + | from | ||
| + | where u.username = p_schema_name; | ||
| + | exception | ||
| + | when no_data_found then | ||
| + | raise_application_error (-20001, ' | ||
| + | end; | ||
| + | l_cursor := dbms_sys_sql.open_cursor; | ||
| + | dbms_sys_sql.parse_as_user ( c => l_cursor | ||
| + | , statement | ||
| + | , language_flag => dbms_sql.native | ||
| + | , userid | ||
| + | ); | ||
| + | l_rc := dbms_sys_sql.execute(l_cursor); | ||
| + | dbms_sys_sql.close_cursor(l_cursor); | ||
| + | exception | ||
| + | when others then | ||
| + | raise_application_error (-20001, ' | ||
| + | end; | ||
| + | / | ||
| + | |||
| + | </ | ||
| + | |||
| + | ==== Run a script on all databases listed in tnsnames.ora ==== | ||
| + | Can run an SQL command file on all SID/SERVICE found in a tnsnames.ora file. I use this from a client laptop having a global tns file on it. | ||
| This way it's not necessary to copy the file onto every host before executing it. Any files created by the script will be created on the client. | This way it's not necessary to copy the file onto every host before executing it. Any files created by the script will be created on the client. | ||
| - | < | + | < |
| + | #!/bin/ksh | ||
| - | =====List invalid objects===== | + | TNSNAMES=" |
| + | |||
| + | [[ ! -r " | ||
| + | |||
| + | SIDLIST=$(perl -ne 'print " | ||
| + | for SID in ${SIDLIST} | ||
| + | do | ||
| + | echo " | ||
| + | ls -al | grep " | ||
| + | if [[ $? -ne 0 ]]; then | ||
| + | echo ' | ||
| + | else | ||
| + | echo " | ||
| + | fi | ||
| + | done | ||
| + | </ | ||
| + | |||
| + | ==== List invalid objects ==== | ||
| + | < | ||
| set lines 200 | set lines 200 | ||
| set pages 200 | set pages 200 | ||
| Line 45: | Line 445: | ||
| | | ||
| | | ||
| - | =====Recompile all invalid objects===== | + | </ |
| + | ==== Recompile all invalid objects ==== | ||
| @?/ | @?/ | ||
| - | =====See how much space is left in the flash recovery area (FRA)===== | + | ==== See how much space is left in the flash recovery area (FRA) ==== |
| - | < | + | < |
| + | select name | ||
| + | , to_char (space_limit, | ||
| + | , to_char (space_limit - space_used + space_reclaimable | ||
| + | , ' | ||
| + | as space_available | ||
| + | , round ( (space_used - space_reclaimable) / space_limit * 100, 1) | ||
| + | as pct_full | ||
| + | from | ||
| + | </ | ||
| or | or | ||
| - | < | + | < |
| - | =====How far back can we flashback?===== | + | # ============================================== |
| - | < | + | # Check FRA to see how much space is reclaimable |
| + | # ============================================== | ||
| + | fra_reclaim_check() { | ||
| + | |||
| + | LOGFILE=${RMAN_LogDir}/ | ||
| + | sqlplus "/ as sysdba" | ||
| + | set vefify off echo off heading off newpage none linesize 1000 echo off | ||
| + | select ' | ||
| + | '; | ||
| + | '; | ||
| + | '; | ||
| + | from | ||
| + | / | ||
| + | EOSQL | ||
| + | cat / | ||
| + | rm / | ||
| + | |||
| + | } | ||
| + | </ | ||
| + | ==== How far back can we flashback? ==== | ||
| + | < | ||
| + | col time_now | ||
| + | col time_oldest_flashback for a25 | ||
| + | select to_char(sysdate,' | ||
| + | , to_char(fdl.oldest_flashback_time, | ||
| + | , round((sysdate-fdl.oldest_flashback_time)*24, | ||
| + | from | ||
| + | , v$flashback_database_log fdl | ||
| + | / | ||
| + | </ | ||
| - | =====DBA privs tables===== | + | ==== DBA privs tables ==== |
| + | < | ||
| | | ||
| | | ||
| Line 66: | Line 506: | ||
| | | ||
| | | ||
| + | </ | ||
| - | =====Schedule a shell job===== | + | ==== Schedule a shell job ==== |
| + | < | ||
| begin | begin | ||
| | | ||
| Line 77: | Line 519: | ||
| end; | end; | ||
| / | / | ||
| - | =====Start the job===== | + | </ |
| + | ==== Start the job ==== | ||
| + | < | ||
| begin | begin | ||
| | | ||
| Line 84: | Line 528: | ||
| ); | ); | ||
| / | / | ||
| + | </ | ||
| - | =====What statement is a user running?===== | + | ==== What SQL statement is a user running? ==== |
| - | < | + | < |
| - | =====See what statements all users are running===== | + | select a.sid |
| + | , a.serial# | ||
| + | , b.sql_text | ||
| + | from | ||
| + | , v$sqlarea b | ||
| + | where a.sql_address = b.address | ||
| + | and a.username | ||
| + | / | ||
| + | </ | ||
| + | ==== See what SQL statements all users are running ==== | ||
| Useful to see what is happening when process limit is reached | Useful to see what is happening when process limit is reached | ||
| - | < | + | < |
| + | set lines 300 pages 1000 | ||
| - | =====A logminer session===== | + | col username for a10 |
| + | col command for 999 head " | ||
| + | col lockwait for a10 | ||
| + | col status for a8 | ||
| + | col schemaname for a12 | ||
| + | col osuser for a10 | ||
| + | col process for a10 | ||
| + | col program for a20 | ||
| + | col machine for a10 | ||
| + | col action for a10 | ||
| + | col module for a10 | ||
| + | col identifier for a10 | ||
| + | col event for a27 | ||
| + | col state for a8 | ||
| + | col service_name for a10 | ||
| + | col serial# for 999999 | ||
| + | |||
| + | select a.username | ||
| + | , a.command | ||
| + | , a.lockwait | ||
| + | , a.status | ||
| + | , a.schemaname | ||
| + | , a.osuser | ||
| + | , a.process | ||
| + | , a.machine | ||
| + | , a.program | ||
| + | , a.type | ||
| + | --, a.sql_exec_start | ||
| + | --, a.plsql_object_id | ||
| + | --, a.plsql_subprogram_id | ||
| + | --, a.module | ||
| + | --, a.action | ||
| + | , a.logon_time | ||
| + | , a.event | ||
| + | , a.wait_time | ||
| + | , a.seconds_in_wait | ||
| + | , a.state | ||
| + | --, a.service_name | ||
| + | , a.serial# | ||
| + | , b.sql_text | ||
| + | from | ||
| + | , v$sqlarea b | ||
| + | where a.sql_address | ||
| + | order by a.logon_time desc | ||
| + | / | ||
| + | |||
| + | |||
| + | set lines 80 | ||
| + | </ | ||
| + | |||
| + | ==== A logminer session ==== | ||
| Mine all SQL statements in a 10 minute period | Mine all SQL statements in a 10 minute period | ||
| - | < | + | < |
| - | =====Huge Pages===== | + | sqlplus / as sysdba |
| - | < | + | alter session set nls_date_format = ' |
| + | execute dbms_logmnr.start_logmnr ( starttime => ' | ||
| + | , endtime | ||
| + | , options | ||
| + | ); | ||
| + | create table mycontents as select * from v$logmnr_contents; | ||
| + | execute dbms_logmnr.end_logmnr(); | ||
| + | |||
| + | select * from mycontents; | ||
| + | |||
| + | drop table mycontents; | ||
| + | </ | ||
| + | ==== Huge Pages ==== | ||
| + | < | ||
| + | # | ||
| + | # | ||
| + | # hugepages_settings.sh | ||
| + | # | ||
| + | # Linux bash script to compute values for the | ||
| + | # recommended HugePages/ | ||
| + | # | ||
| + | # Note: This script does calculation for all shared memory | ||
| + | # segments available when the script is run, no matter it | ||
| + | # is an Oracle RDBMS shared memory segment or not. | ||
| + | # Check for the kernel version | ||
| + | KERN=`uname -r | awk -F. '{ printf(" | ||
| + | ", | ||
| + | # Find out the HugePage size | ||
| + | HPG_SZ=`grep Hugepagesize / | ||
| + | # Start from 1 pages to be on the safe side and guarantee 1 free HugePage | ||
| + | NUM_PG=1 | ||
| + | # Cumulative number of pages required to handle the running shared memory segments | ||
| + | for SEG_BYTES in `ipcs -m | awk {' | ||
| + | do | ||
| + | | ||
| + | if [[ $MIN_PG -gt 0 ]]; then | ||
| + | NUM_PG=`echo " | ||
| + | fi | ||
| + | done | ||
| + | # Finish with results | ||
| + | case $KERN in | ||
| + | ' | ||
| + | echo " | ||
| + | ' | ||
| + | *) echo " | ||
| + | esac | ||
| + | # End | ||
| + | </ | ||
| - | =====Pretty longops listing===== | + | ==== Pretty longops listing ==== |
| - | < | + | < |
| - | < | + | set lines 2000 pages 1000 |
| + | col opname for a34 head " | ||
| + | col message for a60 | ||
| + | col perc_done for a10 head " | ||
| + | col started for a18 | ||
| + | col killer for a15 | ||
| + | col mins_busy head "Mins busy" | ||
| + | col mins_left head "Mins left" | ||
| + | select sid||',' | ||
| + | , opname | ||
| + | , message | ||
| + | , round ((sofar/ | ||
| + | , to_char(start_time,' | ||
| + | , floor(elapsed_seconds/ | ||
| + | , ceil(time_remaining/ | ||
| + | from | ||
| + | where 1=1 | ||
| + | and sofar != totalwork | ||
| + | and totalwork != 0 | ||
| + | and opname | ||
| + | / | ||
| + | </ | ||
| + | < | ||
| + | select * from v$session where module=' | ||
| + | </ | ||
| or attach to the impdp job and it shows percentage done. | or attach to the impdp job and it shows percentage done. | ||
| - | <code> | + | < |
| - | =====Apply | + | Get the SID from v$session_longops and plug it into v$session to check the SQL command details. |
| - | < | + | |
| - | =====Show how much archivelog data is generated per day===== | + | Current Running SQLs |
| + | -------------------- | ||
| + | set pages 1000 lines 2000 | ||
| + | col program | ||
| + | col sql_text format a130 | ||
| + | |||
| + | select s.sid | ||
| + | , s.status | ||
| + | , s.last_call_et | ||
| + | , s.program | ||
| + | , sa.sql_id | ||
| + | , sa.sql_text | ||
| + | from | ||
| + | , v$sqlarea sa | ||
| + | where s.sql_id | ||
| + | and s.sid = '& | ||
| + | / | ||
| + | |||
| + | set pages 1000 lines 2000 | ||
| + | col USERNAME for a10 | ||
| + | col OSUSER | ||
| + | col MACHINE | ||
| + | |||
| + | select s.sid | ||
| + | , s.serial# | ||
| + | , p.spid | ||
| + | , s.username | ||
| + | , s.osuser | ||
| + | , s.status | ||
| + | , s.process fg_pid | ||
| + | , s.longon_time | ||
| + | , s.machine | ||
| + | , p.spid bg_pid | ||
| + | from | ||
| + | , gv$process p | ||
| + | where s.addr = p.addr | ||
| + | and s.sid = '& | ||
| + | / | ||
| + | |||
| + | $ps -ef | grep < | ||
| + | |||
| + | set pages 1000 lines 2000 | ||
| + | SELECT INST_ID | ||
| + | , SID | ||
| + | , SERIAL# | ||
| + | , SQL_ID | ||
| + | , USERNAME | ||
| + | , PROGRAM | ||
| + | , MACHINE | ||
| + | , SERVICE_NAME | ||
| + | FROM | ||
| + | WHERE SID IN ('< | ||
| + | / | ||
| + | Active Running SQLs | ||
| + | -------------------- | ||
| + | set pages 1000 lines 2000 | ||
| + | col SPID for a10 | ||
| + | col PROGRAM | ||
| + | col OSUSER | ||
| + | col ACTION | ||
| + | col EVENT for a25 | ||
| + | col SQL_TEXT for a25 | ||
| + | col MACHINE | ||
| + | col P1TEXT | ||
| + | col P2TEXT | ||
| + | col P3TEXT | ||
| + | SELECT b.sid | ||
| + | , b.serial# | ||
| + | , a.spid | ||
| + | , b.sql_id | ||
| + | , b.program | ||
| + | , b.osuser | ||
| + | , b.machine | ||
| + | , b.type | ||
| + | , b.event | ||
| + | , b.action | ||
| + | , b.p1text | ||
| + | , b.p2text | ||
| + | , b.p3text | ||
| + | , b.state | ||
| + | , c.sql_text | ||
| + | , b.logon_time | ||
| + | FROM | ||
| + | , v$session b | ||
| + | , v$sqltext c | ||
| + | WHERE a.addr | ||
| + | AND b.sql_hash_value = c.hash_value | ||
| + | AND b.STATUS | ||
| + | AND b.sid = '& | ||
| + | ORDER BY a.spid | ||
| + | , c.piece | ||
| + | / | ||
| + | |||
| + | OR - Use the below Query | ||
| + | |||
| + | Longops.sql | ||
| + | ----------- | ||
| + | set pages 50000 lines 32767 | ||
| + | col OPNAME for a10 | ||
| + | col SID form 9999 | ||
| + | col SERIAL form 9999999 | ||
| + | col PROGRAM for a10 | ||
| + | col USERNAME for a10 | ||
| + | col SQL_TEXT for a40 | ||
| + | col START_TIME for a10 | ||
| + | col LAST_UPDATE_TIME for a10 | ||
| + | col TARGET for a25 | ||
| + | col MESSAGE for a25 | ||
| + | |||
| + | alter session set nls_date_format = ' | ||
| + | |||
| + | SELECT l.inst_id, | ||
| + | FROM gv$session_longops l | ||
| + | LEFT OUTER JOIN v$sql s on s.hash_value=l.sql_hash_value and s.address=l.sql_address and s.child_number=0 | ||
| + | WHERE l.OPNAME NOT LIKE ' | ||
| + | / | ||
| + | |||
| + | </ | ||
| + | ==== Apply the database patch after the ORACLE_HOME has been patched ==== | ||
| + | < | ||
| + | ORAENV_ASK=NO | ||
| + | export ORACLE_SID=$1 | ||
| + | . oraenv | ||
| + | |||
| + | sqlplus /nolog << | ||
| + | connect / as sysdba | ||
| + | startup | ||
| + | @?/ | ||
| + | @?/ | ||
| + | purge dba_recyclebin; | ||
| + | |||
| + | prompt check patch apply... | ||
| + | set lines 150 | ||
| + | col action_time for a50 | ||
| + | col comp_name for a50 | ||
| + | col comments for a50 | ||
| + | |||
| + | select action_time, | ||
| + | select comp_name, | ||
| + | exit | ||
| + | EOSQL | ||
| + | </ | ||
| + | |||
| + | ==== Show how much archivelog data is generated per day ==== | ||
| A companion script for 'how many log switches in a day' | A companion script for 'how many log switches in a day' | ||
| - | < | + | < |
| + | col orderby noprint | ||
| + | select trunc(first_time) orderby | ||
| + | , to_char(first_time,' | ||
| + | , round(sum(bytes/ | ||
| + | from ( | ||
| + | select unique (blocks * block_size) bytes, | ||
| + | thread#, | ||
| + | sequence#, | ||
| + | resetlogs_change#, | ||
| + | first_time | ||
| + | from v$archived_log | ||
| + | ) | ||
| + | group by trunc(first_time) | ||
| + | , to_char(first_time, | ||
| + | order by trunc(first_time) desc | ||
| + | / | ||
| + | </ | ||
| - | =====Abort a hanging database even when sysdba cannot connect===== | + | ==== Abort a hanging database even when sysdba cannot connect ==== |
| Unable to connect to database with sqlplus / as sysdba because audit file cannot be created? | Unable to connect to database with sqlplus / as sysdba because audit file cannot be created? | ||
| - | * [[http:// | + | * [[http:// |
| - | * [[http:// | + | * [[http:// |
| - | Instead of just killing the pmon process, there is a kinder (and more useful) way to kill the database.<br /> | + | Instead of just killing the pmon process, there is a kinder (and more useful) way to kill the database. |
| - | Use the hidden, undocumented ' | + | Use the hidden, undocumented ' |
| It can be very useful for hang analysis so the possible cause can be found after the database has been restarted. | It can be very useful for hang analysis so the possible cause can be found after the database has been restarted. | ||
| - | < | + | < |
| + | sqlplus -prelim / as sysdba | ||
| + | oradebug unlimit | ||
| + | oradebug hanganalyze 3 | ||
| + | oradebug setmypid | ||
| + | oradebug dump ashdumpseconds 30 | ||
| + | oradebug dump systemstate 10 -- or oradebug dump systemstate 266 | ||
| + | oradebug tracefile_name | ||
| + | shutdown abort | ||
| + | </ | ||
| - | =====Lists Oracle session with kill session statement all ready to go===== | + | ==== Lists Oracle session with kill session statement all ready to go ==== |
| disconnect attempts to kill the o/s processes too. kill doesn' | disconnect attempts to kill the o/s processes too. kill doesn' | ||
| - | < | + | < |
| + | col killer for a60 | ||
| + | col osuser for a12 | ||
| + | col logon for a23 | ||
| + | col spid for a8 | ||
| + | col username for a15 | ||
| + | col program for a50 | ||
| + | set lines 2000 | ||
| + | select 'alter system disconnect session ''' | ||
| + | , s.inst_id | ||
| + | , to_char(s.logon_time,' | ||
| + | , sci.osuser | ||
| + | , p.spid | ||
| + | , s.username | ||
| + | , s.program | ||
| + | from | ||
| + | , gv$session_connect_info sci | ||
| + | , gv$process p | ||
| + | where 1=1 | ||
| + | and p.addr | ||
| + | and p.inst_id | ||
| + | and s.sid = sci.sid | ||
| + | and s.serial# | ||
| + | and s.type | ||
| + | and s.status | ||
| + | order by s.logon_time desc | ||
| + | / | ||
| - | =====How much space is being used by a guaranteed restore point?===== | ||
| - | < | ||
| - | =====Get statistics for LOB columns in a table===== | + | KILLER |
| - | < | + | ------------------------------------------------------------ ---------- ----------------------- ------------ -------- --------------- -------------------------------------------------- |
| + | alter system disconnect session ' | ||
| + | alter system disconnect session ' | ||
| + | alter system disconnect session ' | ||
| + | alter system disconnect session ' | ||
| + | </ | ||
| - | =====badprivs.sql===== | + | ==== Create a guaranteed restore point ==== |
| + | < | ||
| + | create restore point beginning guarantee flashback database; | ||
| + | </ | ||
| + | |||
| + | ==== How much space is being used by a guaranteed restore point? ==== | ||
| + | < | ||
| + | show parameter recovery | ||
| + | set numwi 15 | ||
| + | set lines 200 | ||
| + | col name for a40 | ||
| + | col time for a23 | ||
| + | col gigs_limit head "Max (but check df -g also!)" | ||
| + | |||
| + | select name | ||
| + | , time | ||
| + | , round(storage_size/ | ||
| + | from | ||
| + | where 1=1 | ||
| + | and guarantee_flashback_database = ' | ||
| + | / | ||
| + | |||
| + | select * from v$flash_recovery_area_usage | ||
| + | / | ||
| + | |||
| + | select name | ||
| + | , round(space_used/ | ||
| + | , round(space_limit/ | ||
| + | , space_reclaimable | ||
| + | , number_of_files | ||
| + | from v$recovery_file_dest | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | ==== Get statistics for LOB columns in a table ==== | ||
| + | < | ||
| + | select | ||
| + | max(dbms_lob.getlength(lob_content)) max_len | ||
| + | , avg(dbms_lob.getlength(lob_content)) avg_len | ||
| + | , min(dbms_lob.getlength(lob_content)) min_len | ||
| + | , sum(case when dbms_lob.getlength(lob_content) <= 4000 then 1 | ||
| + | else 0 end) "< | ||
| + | , sum(case when dbms_lob.getlength(lob_content) > 4000 then 1 | ||
| + | else 0 end) "> | ||
| + | , sum(case when dbms_lob.getlength(lob_content) is null then 1 else | ||
| + | 0 end) "is null" | ||
| + | from & | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | ==== badprivs.sql ==== | ||
| Check to see if any ordinary users have privileges / access / grants / rights they should not have | Check to see if any ordinary users have privileges / access / grants / rights they should not have | ||
| - | < | + | < |
| + | select grantee, privilege, admin_option | ||
| + | from | ||
| + | where ( | ||
| + | or | ||
| + | or | ||
| + | ) | ||
| + | and grantee not in (' | ||
| + | ' | ||
| + | ' | ||
| + | ' | ||
| + | ' | ||
| + | </ | ||
| - | =====Generate a list of tablespaces ready for a migration===== | + | ==== Generate a list of tablespaces ready for a migration ==== |
| - | < | + | < |
| - | =====List the privileges assigned to users on directories===== | + | set echo off |
| + | set lines 300 | ||
| + | set pages 0 | ||
| + | Set headi off | ||
| + | set feedb off | ||
| + | set long 32000 | ||
| + | |||
| + | spool migration_create_target_tablespaces.sql | ||
| + | |||
| + | exec dbms_metadata.set_transform_param(dbms_metadata.session_transform,' | ||
| + | select DBMS_METADATA.GET_DDL(' | ||
| + | / | ||
| + | |||
| + | spool off | ||
| + | </ | ||
| + | ==== List the privileges assigned to users on directories ==== | ||
| table_name is the name of the directory... | table_name is the name of the directory... | ||
| - | < | + | < |
| + | select grantor | ||
| + | , grantee | ||
| + | , table_schema | ||
| + | , table_name | ||
| + | , privilege | ||
| + | from | ||
| + | where table_name = ' | ||
| + | / | ||
| + | </ | ||
| - | =====List the privileges assigned to users on packages/ | + | ==== List the privileges assigned to users on packages/ |
| table_name is the name of the package... | table_name is the name of the package... | ||
| - | < | + | < |
| + | select grantor | ||
| + | , grantee | ||
| + | , table_schema | ||
| + | , table_name | ||
| + | , privilege | ||
| + | from | ||
| + | where upper(table_name) like upper(' | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | ==== List users with DBA privilege (role) ==== | ||
| + | < | ||
| + | all_db_do -q " | ||
| + | </ | ||
| - | =====List the system privileges assigned to a user (used to copy user as or clone user as)===== | + | ==== List the system privileges assigned to a user (used to copy user as or clone user as) ==== |
| Maybe this one is better [[Extract ddl with "dbms metadata.get ddl"]] | Maybe this one is better [[Extract ddl with "dbms metadata.get ddl"]] | ||
| + | < | ||
| | | ||
| FROM ( | FROM ( | ||
| Line 158: | Line 1032: | ||
| START WITH grantee IS NULL | START WITH grantee IS NULL | ||
| | | ||
| + | </ | ||
| or | or | ||
| + | < | ||
| | | ||
| FROM ( | FROM ( | ||
| Line 180: | Line 1056: | ||
| AND username != ' | AND username != ' | ||
| OR grantee=' | OR grantee=' | ||
| + | </ | ||
| or | or | ||
| for a migration from 10g... | for a migration from 10g... | ||
| - | < | + | < |
| + | set headi off | ||
| + | set feedb off | ||
| + | set long 9999999 | ||
| + | set pages 0 | ||
| + | set lines 300 | ||
| + | |||
| + | spool migration_create_target_users_grants.sql | ||
| + | |||
| + | select dbms_metadata.get_ddl(' | ||
| + | from dba_users | ||
| + | where 1=1 | ||
| + | and username like upper(' | ||
| + | / | ||
| + | |||
| + | select 'grant ' | ||
| + | from | ||
| + | where 1=1 | ||
| + | and grantee like upper(' | ||
| + | / | ||
| + | |||
| + | select 'grant ' | ||
| + | from | ||
| + | where 1=1 | ||
| + | and grantee like upper(' | ||
| + | / | ||
| + | |||
| + | spool off | ||
| + | </ | ||
| or for a migration on 11g...! | or for a migration on 11g...! | ||
| - | < | + | < |
| + | set head off | ||
| + | set pages 0 | ||
| + | set long 9999999 | ||
| + | col ddl for a1000 | ||
| - | =====Move datafiles between filesystems===== | ||
| - | * [[https:// | ||
| - | * [[https:// | ||
| - | * [[https:// | ||
| - | ====Set the relevant datafile offline==== | ||
| - | < | ||
| - | ====Rename the files on the filesystem using o/s commands==== | ||
| - | < | ||
| - | ====Run the relevant alter commands to inform the control files of the new locations==== | ||
| - | < | ||
| - | ====Recover the datafiles==== | + | SELECT DBMS_METADATA.GET_DDL(' |
| - | < | + | FROM DBA_USERS |
| - | ====Set the relevant datafile online==== | + | where 1=1 |
| - | < | + | and default_tablespace not in (' |
| - | ====Check all is well with the datafiles by using an appropriate statement==== | + | and upper(username) like ' |
| - | < | + | UNION ALL |
| - | ====Rename a datafile on 12c==== | + | SELECT DBMS_METADATA.GET_GRANTED_DDL(' |
| - | If you're lucky enough to be on 12c, it's one command - no need to offline the datafile! | + | FROM DBA_USERS |
| - | < | + | where 1=1 |
| + | and default_tablespace not in (' | ||
| + | and upper(username) like ' | ||
| + | UNION ALL | ||
| + | SELECT DBMS_METADATA.GET_GRANTED_DDL(' | ||
| + | FROM DBA_USERS | ||
| + | where 1=1 | ||
| + | and default_tablespace not in (' | ||
| + | and upper(username) like ' | ||
| + | UNION ALL | ||
| + | SELECT DBMS_METADATA.GET_GRANTED_DDL(' | ||
| + | FROM DBA_USERS | ||
| + | where 1=1 | ||
| + | and default_tablespace not in (' | ||
| + | and upper(username) like ' | ||
| - | =====Pass parameter/ | + | spool migration_create_users_grants.sql |
| + | / | ||
| + | spool off | ||
| + | </ | ||
| + | |||
| + | ==== Move datafiles between filesystems ==== | ||
| + | Also used to rename a datafile or tempfile using the traditional method | ||
| + | * [[https:// | ||
| + | * [[https:// | ||
| + | * [[https:// | ||
| + | === Check the status of the datafiles === | ||
| + | < | ||
| + | set lines 1000 pages 100 | ||
| + | col file_name for a70 | ||
| + | select file_id, file_name, status, online_status from dba_data_files where tablespace_name = ' | ||
| + | </ | ||
| + | or | ||
| + | === Check the status of the tempfiles === | ||
| + | < | ||
| + | set lines 1000 pages 100 | ||
| + | col file_name for a70 | ||
| + | select v.file#, t.file_name, | ||
| + | </ | ||
| + | === Set the relevant datafile offline === | ||
| + | < | ||
| + | alter database datafile 125 offline; | ||
| + | alter database datafile 126 offline; | ||
| + | alter database datafile 127 offline; | ||
| + | </ | ||
| + | === Rename the files on the filesystem using o/s commands === | ||
| + | This has to be done before the alter otherwise the alter will fail (it checks that the destination file exists) | ||
| + | < | ||
| + | alter tablespace ts_thaler_data_offline; | ||
| + | |||
| + | host mv '/ | ||
| + | host mv '/ | ||
| + | host mv '/ | ||
| + | </ | ||
| + | === Run the relevant alter commands to inform the control files of the new locations === | ||
| + | < | ||
| + | alter database move datafile '/ | ||
| + | alter database move datafile '/ | ||
| + | alter database move datafile '/ | ||
| + | </ | ||
| + | |||
| + | === Recover the datafiles === | ||
| + | This step is skipped when renaming tempfiles | ||
| + | < | ||
| + | recover datafile 125; | ||
| + | recover datafile 126; | ||
| + | recover datafile 127; | ||
| + | </ | ||
| + | === Set the relevant datafile online === | ||
| + | < | ||
| + | alter database datafile 125 online; | ||
| + | alter database datafile 126 online; | ||
| + | alter database datafile 127 online; | ||
| + | </ | ||
| + | === Rename a datafile on 12c and higher === | ||
| + | If you're lucky enough to be on 12c or higher it's one command - no need to offline the datafile! | ||
| + | < | ||
| + | SQL> alter database move datafile '/ | ||
| + | |||
| + | Database altered. | ||
| + | </ | ||
| + | |||
| + | It seems this method cannot be used for tempfiles - even though the error message suggests the database is aware of it! Have to use the traditional method above. | ||
| + | < | ||
| + | SQL> alter database move tempfile '/ | ||
| + | alter database move tempfile '/ | ||
| + | * | ||
| + | ERROR at line 1: | ||
| + | ORA-00905: missing keyword | ||
| + | |||
| + | |||
| + | SQL> alter database move datafile '/ | ||
| + | alter database move datafile '/ | ||
| + | * | ||
| + | ERROR at line 1: | ||
| + | ORA-01516: nonexistent log file, data file, or temporary file | ||
| + | "/ | ||
| + | |||
| + | </ | ||
| + | |||
| + | |||
| + | ==== Pass parameter/ | ||
| Trick? Just use ARGV to pass them in... | Trick? Just use ARGV to pass them in... | ||
| - | < | + | < |
| + | FREQ=$(perl -e ' | ||
| + | </ | ||
| or | or | ||
| - | < | + | < |
| + | DUMP_DATE=`perl -MTime:: | ||
| + | </ | ||
| or export the variable from shell and access it via the ENV hash | or export the variable from shell and access it via the ENV hash | ||
| - | < | + | < |
| + | export db=ENDP1 | ||
| + | perl -p -i -e ' | ||
| + | </ | ||
| - | =====Return epoch seconds in Perl===== | + | ==== Return epoch seconds in Perl ==== |
| - | < | + | < |
| + | perl -e 'print time();' | ||
| + | </ | ||
| or to convert a specific day of the year to epoch seconds | or to convert a specific day of the year to epoch seconds | ||
| - | < | + | < |
| - | or for a specific date (careful though - month is 0-11 not 1-12!...<br /> | + | use POSIX; |
| - | < | + | my $epochsecs = mktime(0, |
| - | or more long-winded but delimited by anything...<br /> | + | </ |
| + | or for a specific date (careful though - month is 0-11 not 1-12!... | ||
| + | < | ||
| + | use Time:: | ||
| + | # SS MM HH DD MM YYYY | ||
| + | my $epochsecs = timelocal (30, | ||
| + | </ | ||
| + | or more long-winded but delimited by anything... | ||
| Supply the date/time in any delimited format. Eg. YYYY-MM-DD: | Supply the date/time in any delimited format. Eg. YYYY-MM-DD: | ||
| - | < | + | < |
| + | #!/usr/ | ||
| + | use Time:: | ||
| - | =====Return epoch seconds in DOS/ | + | sub date2epoch { |
| + | my($s) | ||
| + | my($year, $month, $day, $hour, $minute, $second); | ||
| + | |||
| + | if($s =~ m{^\s*(\d{1, | ||
| + | | ||
| + | $year = $1; $month = $2; $day = $3; | ||
| + | $hour = $4; $minute = $5; $second = $6; | ||
| + | $hour |= 0; $minute |= 0; $second |= 0; # defaults. | ||
| + | $year = ($year< | ||
| + | return timelocal($second, | ||
| + | } | ||
| + | return -1; | ||
| + | }</ | ||
| + | |||
| + | ==== Return epoch seconds in DOS/ | ||
| Paste this code into epoch.vbs | Paste this code into epoch.vbs | ||
| - | < | + | < |
| + | function date2epoch(p_date) | ||
| + | date2epoch = DateDiff(" | ||
| + | end function | ||
| + | |||
| + | Wscript.Echo date2epoch(Now()) | ||
| + | </ | ||
| and call from a DOS box like this | and call from a DOS box like this | ||
| - | < | + | < |
| + | cscript //nologo epoch.vbs | ||
| + | </ | ||
| - | =====Return date from epoch seconds in Perl===== | + | ==== Return date from epoch seconds in Perl ==== |
| - | < | + | < |
| - | =====Return epoch seconds from julian day number in Perl===== | + | perl -e 'print scalar (localtime (1243269270))' |
| - | < | + | </ |
| - | =====Extract specific pieces of a date from an epoch timestamp in Perl===== | + | ==== Return epoch seconds from julian day number in Perl ==== |
| - | < | + | < |
| + | use POSIX; | ||
| + | my ($year, $jday) = (2012, | ||
| + | my $epochsecs = mktime(0, | ||
| + | </ | ||
| + | ==== Extract specific pieces of a date from an epoch timestamp in Perl ==== | ||
| + | < | ||
| + | use POSIX; | ||
| + | # return localtime into an array | ||
| + | my @tm = localtime $epochsecs; | ||
| - | =====Return epoch seconds in Shell (linux)===== | + | my $yyyymmdd |
| - | < | + | print $yyyymmdd." |
| + | "; | ||
| + | </ | ||
| - | =====Return | + | ==== Return epoch seconds in Shell (linux) ==== |
| - | < | + | < |
| + | date -d " | ||
| + | </ | ||
| - | =====Return epoch seconds in PL/SQL===== | + | ==== Return date from epoch seconds in Shell (linux) ==== |
| + | < | ||
| + | date -d @1396542870 | ||
| + | </ | ||
| + | |||
| + | ==== Return epoch seconds in PL/SQL ==== | ||
| You cannot just use sysdate as that is timezone sensitive... (epoch seconds are in UTC/GMT) | You cannot just use sysdate as that is timezone sensitive... (epoch seconds are in UTC/GMT) | ||
| - | < | + | < |
| + | select round((cast(sys_extract_utc(systimestamp) as date) - to_date(' | ||
| + | or | ||
| + | select round((vd.created - to_date(' | ||
| + | </ | ||
| How long has this instance been up (in minutes)? | How long has this instance been up (in minutes)? | ||
| - | < | + | < |
| + | select round((cast(sys_extract_utc(systimestamp) as date) - startup_time)*24*60) up_mins from v$instance; | ||
| + | </ | ||
| - | =====Return date from epoch seconds in SQL===== | + | ==== Return date from epoch seconds in SQL ==== |
| - | < | + | < |
| + | select to_char(to_date(' | ||
| + | </ | ||
| - | =====A database overview using the sys.plato package===== | + | ==== A database overview using the sys.plato package ==== |
| + | < | ||
| exec plato.help; | exec plato.help; | ||
| + | </ | ||
| or | or | ||
| + | < | ||
| exec plato.complete(' | exec plato.complete(' | ||
| + | </ | ||
| + | |||
| + | ==== List datafiles for a tablespace ==== | ||
| + | < | ||
| + | select * | ||
| + | from | ||
| + | where 1=1 | ||
| + | and tablespace_name ='& | ||
| + | </ | ||
| + | ==== Show graph of available and free space in tablespaces ==== | ||
| + | < | ||
| + | -- ============================================================================= | ||
| + | -- File Name : http:// | ||
| + | -- Author | ||
| + | -- Description | ||
| + | -- Requirements : Access to the DBA views. | ||
| + | -- Call Syntax | ||
| + | -- Last Modified: 13-OCT-2012 - Created. Based on ts_full.sql | ||
| + | -- | ||
| + | -- Modification History | ||
| + | -- ==================== | ||
| + | -- When Who What | ||
| + | -- ========= ================= ================================================= | ||
| + | -- 13-NOV-13 Stuart Barkley | ||
| + | -- ============================================================================= | ||
| + | SET PAGESIZE 140 lines 180 | ||
| + | COLUMN used_pct FORMAT A11 | ||
| + | COLUMN max_used_pct FORMAT A11 | ||
| + | |||
| + | SELECT tablespace_name | ||
| + | , size_mb | ||
| + | , free_mb | ||
| + | , TRUNC((free_mb/ | ||
| + | , RPAD(' '|| RPAD(' | ||
| + | , max_size_mb | ||
| + | , max_free_mb | ||
| + | , TRUNC((max_free_mb/ | ||
| + | , RPAD(' '|| RPAD(' | ||
| + | from ( | ||
| + | | ||
| + | , | ||
| + | , | ||
| + | , | ||
| + | , | ||
| + | | ||
| + | select tablespace_name | ||
| + | , trunc(sum(bytes)/ | ||
| + | from | ||
| + | group by tablespace_name | ||
| + | ) a | ||
| + | , | ||
| + | select tablespace_name | ||
| + | , trunc(sum(bytes)/ | ||
| + | , trunc(sum(greatest(bytes, | ||
| + | from | ||
| + | group by tablespace_name | ||
| + | ) b | ||
| + | , | ||
| + | | ||
| + | ) | ||
| + | order by tablespace_name; | ||
| - | =====List datafiles for a tablespace===== | + | set lines 80 |
| - | < | + | </ |
| - | =====Show graph of available and free space in tablespaces===== | + | |
| - | < | + | |
| - | =====Another method for calculating free space in tablespace | + | ==== Another method for calculating free space in tablespace |
| - | < | + | < |
| - | =====Work out maximum possible size of a datafile===== | + | select df.tablespace_name " |
| - | < | + | , totalusedspace "Used MB" |
| + | , (df.totalspace - tu.totalusedspace) "Free MB" | ||
| + | , df.totalspace "Total MB" | ||
| + | , round(100 * ( (df.totalspace - tu.totalusedspace)/ | ||
| + | from ( | ||
| + | | ||
| + | , round(sum(bytes) / 1048576) totalspace | ||
| + | | ||
| + | | ||
| + | ) df | ||
| + | , ( | ||
| + | | ||
| + | , | ||
| + | | ||
| + | | ||
| + | ) tu | ||
| + | where df.tablespace_name = tu.tablespace_name; | ||
| + | </ | ||
| + | ==== Work out maximum possible size of a datafile ==== | ||
| + | < | ||
| + | select round( (4*1024*1024*value) / (1024*1024*1024) ,2) maxfilesizeingigabytes from v$parameter where name=' | ||
| + | </ | ||
| or, as tablespaces can have block sizes differing from that of the init.ora parameter... | or, as tablespaces can have block sizes differing from that of the init.ora parameter... | ||
| - | < | + | < |
| + | select round( (4*1024*1024*block_size) / (1024*1024*1024) ,2) maxfilesizeingigabytes from dba_tablespaces where tablespace_name='& | ||
| + | </ | ||
| - | =====Show size and maxsize at datafile level===== | + | ==== Show size and maxsize at datafile level ==== |
| datafiles | datafiles | ||
| - | < | + | < |
| + | set pages 100 lines 200 | ||
| + | col file_name for a60 | ||
| + | select sum(bytes)/ | ||
| + | , sum(maxbytes)/ | ||
| + | , file_name | ||
| + | from | ||
| + | group by file_name | ||
| + | / | ||
| + | </ | ||
| tempfiles | tempfiles | ||
| - | < | + | < |
| + | set pages 100 lines 200 | ||
| + | col file_name for a60 | ||
| + | select sum(bytes)/ | ||
| + | , sum(maxbytes)/ | ||
| + | , file_name | ||
| + | from | ||
| + | group by file_name | ||
| + | / | ||
| + | </ | ||
| - | =====Resize the online redo logfiles===== | + | ==== Resize the online redo logfiles ==== |
| - | Found a more concise, smarter way of doing it [[http:// | + | Found a more concise, smarter way of doing it [[http:// |
| - | Switching logs too often? Redolog files too small?<br /> | + | Switching logs too often? Redolog files too small? |
| - | * Show current log groups | + | * Show current log groups |
| - | < | + | < |
| - | * Create additional (bigger) groups | + | set lines 1000 |
| - | < | + | col group_member for a60 |
| + | select l.group# | ||
| + | , (bytes/ | ||
| + | , l.status | ||
| + | , f.member | ||
| + | , l.archived | ||
| + | , f.type | ||
| + | from | ||
| + | , v$logfile f | ||
| + | where l.group# = f.group# | ||
| + | order by l.group# | ||
| + | , f.member | ||
| + | / | ||
| + | </ | ||
| + | * Create additional (bigger) groups | ||
| + | < | ||
| + | define SID=TIBQFW | ||
| + | alter database add logfile group 6 '/ | ||
| + | alter database add logfile group 7 '/ | ||
| + | alter database add logfile group 8 '/ | ||
| + | alter database add logfile group 9 '/ | ||
| + | alter database add logfile group 10 '/ | ||
| + | </ | ||
| or if you use multiple members... | or if you use multiple members... | ||
| - | < | + | < |
| - | * Switch archivelogs until the new ones are current and the old ones are inactive | + | alter database add logfile group 6 ('/ |
| - | < | + | alter database add logfile group 7 ('/ |
| + | alter database add logfile group 8 ('/ | ||
| + | </ | ||
| + | * Switch archivelogs until the new ones are current and the old ones are inactive | ||
| + | < | ||
| + | alter system switch logfile; | ||
| + | </ | ||
| If the old logs are not yet inactive, checkpoint the database | If the old logs are not yet inactive, checkpoint the database | ||
| - | < | + | < |
| - | * Drop the old groups | + | alter system checkpoint; |
| - | < | + | </ |
| + | * Drop the old groups | ||
| + | < | ||
| + | alter database drop logfile group 1; | ||
| + | alter database drop logfile group 2; | ||
| + | alter database drop logfile group 3; | ||
| + | alter database drop logfile group 4; | ||
| + | alter database drop logfile group 5; | ||
| + | </ | ||
| - | =====Increase the existing size of a datafile===== | + | ==== Increase the existing size of a datafile ==== |
| - | < | + | < |
| + | alter database datafile '& | ||
| + | </ | ||
| - | =====Increase the max size of a tempfile===== | + | ==== Increase the max size of a tempfile ==== |
| - | < | + | < |
| - | =====Find out what processes are using the temp tablespace===== | + | alter database tempfile '/ |
| + | </ | ||
| + | ==== Find out what processes are using the temp tablespace ==== | ||
| Temporary tablespace is used for sorting query results. Find them with this. | Temporary tablespace is used for sorting query results. Find them with this. | ||
| - | < | + | < |
| + | SELECT b.tablespace, | ||
| + | a.username, | ||
| + | FROM v$session a, | ||
| + | WHERE a.saddr = b.session_addr; | ||
| + | </ | ||
| - | =====Increase the maximum size of a datafile===== | + | ==== Increase the maximum size of a datafile ==== |
| Although this can be set lower than existing size, try not to. It makes the reports look weird! | Although this can be set lower than existing size, try not to. It makes the reports look weird! | ||
| - | < | + | < |
| + | alter database | ||
| + | datafile '& | ||
| + | autoextend on | ||
| + | maxsize & | ||
| + | </ | ||
| - | =====Find schemas (users) with objects created outside their default tablespace===== | + | ==== Find schemas (users) with objects created outside their default tablespace ==== |
| Indexes could be a notable exception if they are created in an index tablespace | Indexes could be a notable exception if they are created in an index tablespace | ||
| - | < | + | < |
| + | set lines 200 | ||
| + | col owner for a20 | ||
| + | col segment_name | ||
| + | col tablespace_name for a32 | ||
| + | select owner | ||
| + | , segment_name | ||
| + | , tablespace_name | ||
| + | from | ||
| + | where exists (select username from dba_users where username = owner) | ||
| + | and tablespace_name != (select default_tablespace from dba_users where username = owner) | ||
| + | and owner = '& | ||
| + | / | ||
| + | </ | ||
| - | =====Add a datafile to a tablespace===== | + | ==== Add a datafile to a tablespace ==== |
| ' | ' | ||
| - | < | + | < |
| + | alter tablepace & | ||
| + | add datafile '& | ||
| + | size 100M | ||
| + | autoextend on | ||
| + | next 100M | ||
| + | maxsize unlimited | ||
| + | </ | ||
| - | =====List all datafiles with their size===== | + | ==== List all datafiles with their size ==== |
| - | < | + | < |
| - | =====Database size (How big is my database?)===== | + | set pages 1000 lines 2000 |
| - | There are different interpretations of what constitutes the size of a database...<br /> | + | col filetype for a15 |
| + | col name for a60 | ||
| + | break on report | ||
| + | compute sum of gig on report | ||
| + | |||
| + | select filetype | ||
| + | , name | ||
| + | , gig | ||
| + | from ( | ||
| + | | ||
| + | , | ||
| + | , | ||
| + | | ||
| + | union all | ||
| + | | ||
| + | , | ||
| + | , | ||
| + | | ||
| + | union all | ||
| + | | ||
| + | , | ||
| + | , | ||
| + | | ||
| + | , | ||
| + | | ||
| + | | ||
| + | ) used | ||
| + | , ( | ||
| + | | ||
| + | | ||
| + | ) free | ||
| + | </ | ||
| + | ==== Database size (How big is my database?) ==== | ||
| + | There are different interpretations of what constitutes the size of a database... | ||
| Is it the space allocated to the datafiles or only the space taken up by the data? Are tempfiles included? Are redo logs included? etc... | Is it the space allocated to the datafiles or only the space taken up by the data? Are tempfiles included? Are redo logs included? etc... | ||
| - | < | + | < |
| + | set lines 132 | ||
| + | col dbname for a9 heading " | ||
| + | col dbsize for a15 heading " | ||
| + | col dbused for a15 heading "Used space" | ||
| + | col dbfree for a15 heading "Free space" | ||
| + | with used_space as | ||
| + | ( | ||
| + | select | ||
| + | from v$datafile | ||
| + | union all | ||
| + | select | ||
| + | from v$tempfile | ||
| + | union all | ||
| + | select | ||
| + | from v$log | ||
| + | ) | ||
| + | , free_space as | ||
| + | ( | ||
| + | select sum(bytes) sum_bytes | ||
| + | from dba_free_space | ||
| + | ) | ||
| + | select vd.name | ||
| + | , round(sum(used_space.bytes)/ | ||
| + | , round(sum(used_space.bytes)/ | ||
| + | , round(free_space.sum_bytes/ | ||
| + | from | ||
| + | , used_space | ||
| + | , v$database vd | ||
| + | group by vd.name | ||
| + | , free_space.sum_bytes | ||
| + | / | ||
| + | </ | ||
| - | =====Drop datafile from temp tablespace===== | + | ==== Drop datafile from temp tablespace ==== |
| - | Database is completely stuck because the temporary tablespace has grown to fill all available room.<br /> | + | Database is completely stuck because the temporary tablespace has grown to fill all available room. |
| - | Look around for at least some log files to delete in order to get connected (audit files, sqlnet logs, listener logs)<br /> | + | Look around for at least some log files to delete in order to get connected (audit files, sqlnet logs, listener logs) |
| Once connected, change the default temporary tablespace to a small one so we can remove the original. | Once connected, change the default temporary tablespace to a small one so we can remove the original. | ||
| - | < | + | < |
| - | < | + | create temporary tablespace temp2 tempfile '/ |
| + | </ | ||
| + | < | ||
| + | alter database default temporary tablespace temp2; | ||
| + | </ | ||
| I found trying to drop the datafile like this didn't work, just hung there (probably active tranactions)... | I found trying to drop the datafile like this didn't work, just hung there (probably active tranactions)... | ||
| - | < | + | < |
| + | drop tablespace temp including contents and datafiles; | ||
| + | </ | ||
| So did it like this... | So did it like this... | ||
| - | < | + | < |
| + | alter database tempfile '/ | ||
| + | |||
| + | Database altered. | ||
| + | |||
| + | drop tablespace temp including contents and datafiles; | ||
| + | |||
| + | Tablespace dropped. | ||
| + | </ | ||
| ... an alternative would be to find the sessions using the temp space and kill them... | ... an alternative would be to find the sessions using the temp space and kill them... | ||
| - | < | + | < |
| + | select sid, serial#, status from v$session where serial# in (select session_num from v$sort_usage); | ||
| + | </ | ||
| Then to be neat and tidy, rename the temp2 tablespace back to temp. | Then to be neat and tidy, rename the temp2 tablespace back to temp. | ||
| - | < | + | < |
| + | create temporary tablespace temp tempfile '/ | ||
| + | alter database default temporary tablespace temp; | ||
| + | drop tablespace temp2 including contents and datafiles; | ||
| + | </ | ||
| Although the last statement will not work until any transactions using it have been rolled back or committed. | Although the last statement will not work until any transactions using it have been rolled back or committed. | ||
| - | =====List files that are in hot-backup mode===== | + | ==== List files that are in hot-backup mode ==== |
| + | < | ||
| set lines 100 | set lines 100 | ||
| set pages 100 | set pages 100 | ||
| col name format a60 | col name format a60 | ||
| - | + | </ | |
| + | |||
| + | < | ||
| | | ||
| , | , | ||
| Line 352: | Line 1678: | ||
| | | ||
| | | ||
| + | </ | ||
| - | =====Waiting for snapshot control file enqueue===== | + | ==== Waiting for snapshot control file enqueue ==== |
| Script to identify sessions causing RMAN to hang because of ' | Script to identify sessions causing RMAN to hang because of ' | ||
| - | < | + | < |
| + | set lines 2000 | ||
| + | col killer | ||
| + | col program | ||
| + | col module | ||
| + | col action | ||
| + | col logon_time for a20 | ||
| - | =====Info on blocking processes===== | + | select 'alter system disconnect session ''' |
| - | < | + | , username |
| + | , program | ||
| + | , module | ||
| + | , action | ||
| + | , logon_time | ||
| + | from | ||
| + | , v$enqueue_lock l | ||
| + | where l.sid | ||
| + | and l.type = ' | ||
| + | and l.id1 = 0 | ||
| + | and l.id2 = 2 | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | ==== Info on blocking processes ==== | ||
| + | < | ||
| + | select s1.username || '@' || s1.machine | ||
| + | || ' ( SID=' || s1.sid || ' ) is blocking ' | ||
| + | || s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) ' | ||
| + | from | ||
| + | , v$session s1 | ||
| + | , v$lock l2 | ||
| + | , v$session s2 | ||
| + | where s1.sid | ||
| + | and s2.sid | ||
| + | and l1.block | ||
| + | and l2.request > 0 | ||
| + | and l1.id1 | ||
| + | and l2.id2 | ||
| + | / | ||
| + | </ | ||
| session doing the blocking | session doing the blocking | ||
| - | < | + | < |
| + | select * | ||
| + | from v$lock l1 | ||
| + | where 1=1 | ||
| + | and block = 1 ; | ||
| + | </ | ||
| sessions being blocked | sessions being blocked | ||
| - | < | + | < |
| + | select * | ||
| + | from | ||
| + | where 1=1 | ||
| + | and id2 = 85203 | ||
| + | </ | ||
| info on session doing the blocking | info on session doing the blocking | ||
| - | < | + | < |
| + | select row_wait_obj#, | ||
| + | from | ||
| + | where sid = 234 | ||
| + | |||
| + | select do.object_name | ||
| + | , row_wait_obj# | ||
| + | , row_wait_file# | ||
| + | , row_wait_block# | ||
| + | , row_wait_row# | ||
| + | , dbms_rowid.rowid_create ( 1, ROW_WAIT_OBJ#, | ||
| + | from | ||
| + | , dba_objects do | ||
| + | where sid=234 | ||
| + | and s.ROW_WAIT_OBJ# | ||
| + | </ | ||
| - | =====Show locked objects===== | + | ==== Show locked objects ==== |
| + | < | ||
| set lines 100 | set lines 100 | ||
| set pages 999 | set pages 999 | ||
| Line 401: | Line 1790: | ||
| | | ||
| | | ||
| - | , session_id; | + | ,\tsession_id; |
| + | </ | ||
| - | =====Show which row is locked===== | + | ==== Show which row is locked ==== |
| + | < | ||
| | | ||
| , | , | ||
| Line 415: | Line 1806: | ||
| | | ||
| | | ||
| + | </ | ||
| - | =====Check what is audited on a database===== | + | ==== Check what is audited on a database ==== |
| - | < | + | < |
| - | =====How old is the oldest audit record?===== | + | set pagesize 100 |
| - | < | + | set linesize 256 |
| - | =====Check what objects are being audited===== | + | select audit_option, |
| - | < | + | </ |
| + | ==== How old is the oldest audit record? ==== | ||
| + | < | ||
| + | select extract (day from (systimestamp - min(ntimestamp# | ||
| + | </ | ||
| + | ==== Check what objects are being audited ==== | ||
| + | < | ||
| + | tti col 60 ' | ||
| + | set lines 1000 pages 100 | ||
| + | col alt for a3 head " | ||
| + | col aud for a3 head " | ||
| + | col com for a3 head " | ||
| + | col del for a3 head " | ||
| + | col gra for a3 head " | ||
| + | col ind for a3 head " | ||
| + | col ins for a3 head " | ||
| + | col loc for a3 head " | ||
| + | col ren for a3 head " | ||
| + | col sel for a3 head " | ||
| + | col upd for a3 head " | ||
| + | col ref for a3 head " | ||
| + | col exe for a3 head " | ||
| + | col cre for a3 head " | ||
| + | col rea for a3 head " | ||
| + | col wri for a3 head " | ||
| + | col fbk for a3 head " | ||
| + | \t | ||
| + | select * | ||
| + | from | ||
| + | / | ||
| + | </ | ||
| - | =====Procedure to delete old audit records===== | + | ==== Procedure to delete old audit records ==== |
| - | < | + | < |
| + | select text from all_source where lower(name)=' | ||
| - | =====Job setup to run the audit purge===== | + | TEXT |
| - | < | + | ------------------------------------------------------------------------------------------------ |
| - | =====A list of all SIDs in oratab that should restart after a server boot===== | + | PROCEDURE purge_audit_trail IS |
| + | BEGIN | ||
| + | -- set the last archive timestamp | ||
| + | DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP( | ||
| + | | ||
| + | | ||
| + | | ||
| + | ); | ||
| + | -- purge audit trail till last archive timestamp | ||
| + | DBMS_AUDIT_MGMT.CLEAN_AUDIT_TRAIL( audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD | ||
| + | , use_last_arch_timestamp => TRUE); | ||
| + | |||
| + | END purge_audit_trail; | ||
| + | </ | ||
| + | |||
| + | ==== Job setup to run the audit purge ==== | ||
| + | < | ||
| + | BEGIN | ||
| + | sys.dbms_scheduler.create_job( | ||
| + | job_name => '" | ||
| + | job_type => ' | ||
| + | job_action => '" | ||
| + | repeat_interval => ' | ||
| + | start_date => to_timestamp_tz(' | ||
| + | job_class => '" | ||
| + | comments => 'purge audit trails until last archive timestamp', | ||
| + | auto_drop => FALSE, | ||
| + | enabled => FALSE | ||
| + | ); | ||
| + | sys.dbms_scheduler.set_attribute( name => '" | ||
| + | sys.dbms_scheduler.set_attribute( name => '" | ||
| + | sys.dbms_scheduler.enable( '" | ||
| + | END; | ||
| + | / | ||
| + | </ | ||
| + | ==== A list of all SIDs in oratab that should restart after a server boot ==== | ||
| Using awk | Using awk | ||
| - | < | + | < |
| + | awk -F: ' | ||
| + | </ | ||
| Using perl | Using perl | ||
| - | < | + | < |
| + | perl -F: -ne 'print " | ||
| + | </ | ||
| + | |||
| + | ==== Move the admin directories to a new location ==== | ||
| + | < | ||
| + | ./all_db_do "alter system set audit_file_dest='/ | ||
| + | |||
| + | ./all_db_do " | ||
| + | |||
| + | for db in `awk -F: 'NF && $1 !~/[#*]/ {print $1}' | ||
| + | do | ||
| + | echo $db | ||
| + | mkdir -p / | ||
| + | echo "/ | ||
| + | mv / | ||
| + | mv / | ||
| + | echo "Moved / | ||
| + | done | ||
| + | |||
| + | ./all_db_do startup | ||
| - | =====Move the admin directories to a new location===== | + | ./all_db_do " |
| - | < | + | </ |
| - | =====Check if the Standby database is up-to-date with the primary database===== | + | ==== Check if the Standby database is up-to-date with the primary database ==== |
| + | < | ||
| | | ||
| , | , | ||
| Line 467: | Line 1948: | ||
| | | ||
| | | ||
| + | </ | ||
| - | =====Check for gaps on the Standby database===== | + | ==== Check for gaps on the Standby database ==== |
| Run this on the standby | Run this on the standby | ||
| - | < | + | < |
| + | select thread#, low_sequence#, | ||
| + | </ | ||
| or run this on the Primary to see the latest generated archive... | or run this on the Primary to see the latest generated archive... | ||
| - | < | + | < |
| + | select thread# | ||
| + | , sequence# "Last sequence generated" | ||
| + | from | ||
| + | where (thread#, first_time) in ( select thread#, max(first_time) | ||
| + | from | ||
| + | group by thread# | ||
| + | ) | ||
| + | order by 1 | ||
| + | / | ||
| + | </ | ||
| and run this on the standby to check for received but not yet applied logs (this could be normal if a lag has been set)... | and run this on the standby to check for received but not yet applied logs (this could be normal if a lag has been set)... | ||
| - | < | + | < |
| + | select arch.thread# | ||
| + | , arch.sequence# | ||
| + | , appl.sequence# | ||
| + | , (arch.sequence# | ||
| + | from ( | ||
| + | | ||
| + | , | ||
| + | | ||
| + | | ||
| + | from | ||
| + | group by thread# | ||
| + | ) | ||
| + | ) arch | ||
| + | , ( | ||
| + | | ||
| + | , | ||
| + | | ||
| + | | ||
| + | from | ||
| + | group by thread# | ||
| + | ) | ||
| + | ) appl | ||
| + | where arch.thread# | ||
| + | order by 1 | ||
| + | / | ||
| + | </ | ||
| Compare the results from Primary and Secondary to detect possible network problems or other errors in shipping. | Compare the results from Primary and Secondary to detect possible network problems or other errors in shipping. | ||
| - | =====Ignore case-sensitive passwords in 11g (deprecated in 12c)===== | + | ==== Ignore case-sensitive passwords in 11g (deprecated in 12c) ==== |
| - | < | + | < |
| + | alter system set SEC_CASE_SENSITIVE_LOGON = false scope=both | ||
| + | </ | ||
| - | =====External table for the Alert log (pre 11g)===== | + | ==== External table for the Alert log (pre 11g) ==== |
| - | Modified to use (the better) scripts found on [[http:// | + | Modified to use (the better) scripts found on [[http:// |
| - | This script | + | This script |
| - | < | + | Ensure the owning schema has the following privileges: |
| + | create any directory | ||
| + | drop any directory | ||
| - | =====Alert log (11g+)===== | + | * create_custom_alertlog.sql |
| - | This reads the log.xml file (which contains the same information as alert.log)<br /> | + | < |
| + | -- ================ | ||
| + | -- to be run as sys | ||
| + | -- ================ | ||
| + | |||
| + | -- --------------------------------------------------- | ||
| + | -- create an external table based on the text alertlog | ||
| + | -- --------------------------------------------------- | ||
| + | column db new_value _db noprint; | ||
| + | column bdump new_value _bdump noprint; | ||
| + | |||
| + | select instance_name db from v$instance; | ||
| + | |||
| + | select value bdump | ||
| + | from | ||
| + | where name = ' | ||
| + | |||
| + | drop | ||
| + | create directory bdump as '&& | ||
| + | |||
| + | grant read,write on directory bdump to public; | ||
| + | |||
| + | drop table monuser.alert_log_disk; | ||
| + | create table monuser.alert_log_disk ( text varchar2(2000) ) | ||
| + | organization external ( | ||
| + | type oracle_loader | ||
| + | default directory bdump | ||
| + | access parameters ( records delimited by newline nologfile nobadfile | ||
| + | fields terminated by "&" | ||
| + | ) | ||
| + | location(' | ||
| + | ) | ||
| + | reject limit unlimited; | ||
| + | |||
| + | |||
| + | -- ------------------------------ | ||
| + | -- create a custom alertlog table | ||
| + | -- ------------------------------ | ||
| + | drop table monuser.alert_log; | ||
| + | create table monuser.alert_log ( | ||
| + | indx | ||
| + | , | ||
| + | , | ||
| + | ) | ||
| + | storage (initial 512k next 512K pctincrease 0); | ||
| + | |||
| + | create index monuser.alert_log_u1 on monuser.alert_log(indx) | ||
| + | storage (initial 512k next 512K pctincrease 0); | ||
| + | |||
| + | create index monuser.alert_log_u2 on monuser.alert_log(message_date) | ||
| + | storage (initial 512k next 512K pctincrease 0); | ||
| + | |||
| + | drop | ||
| + | create sequence monuser.alert_log_s1 minvalue 1 increment by 1; | ||
| + | |||
| + | |||
| + | -- ----------------------------------------------------------------- | ||
| + | -- make this custom table appear the same as the more modern version | ||
| + | -- ----------------------------------------------------------------- | ||
| + | drop view monuser.x$dbgalertext; | ||
| + | create view monuser.x$dbgalertext as | ||
| + | select indx | ||
| + | , message_date | ||
| + | , message_text | ||
| + | from | ||
| + | |||
| + | drop public synonym x$dbgalertext; | ||
| + | create public synonym x$dbgalertext for monuser.x$dbgalertext; | ||
| + | </ | ||
| + | Now, after the two tables are created, the alert_log table can be filled with the following script.\\ | ||
| + | It only loads those records that are greater than the last time it loaded. And it loads the date/time on every line for convienence.\\ | ||
| + | It also helps when the alertlogs get rotated. You still keep the history within an Oracle table.\\ | ||
| + | Finally, it also strips out all the crap that is really not needed to see if you are looking for errors. | ||
| + | * update_custom_alertlog.sql | ||
| + | < | ||
| + | set serveroutput on | ||
| + | alter session set current_schema=monuser; | ||
| + | |||
| + | declare | ||
| + | |||
| + | isdate | ||
| + | start_updating number := 0; | ||
| + | rows_inserted | ||
| + | |||
| + | message_date | ||
| + | max_date | ||
| + | |||
| + | message_text | ||
| + | |||
| + | begin | ||
| + | |||
| + | dbms_output.enable(null); | ||
| + | |||
| + | /* find a starting date */ | ||
| + | select max(message_date) into max_date from alert_log; | ||
| + | |||
| + | if (max_date is null) then | ||
| + | max_date := to_date(' | ||
| + | end if; | ||
| + | |||
| + | for r in ( | ||
| + | select substr(text, | ||
| + | from | ||
| + | where text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like '%Undo Segment%lined%' | ||
| + | and text not like ' | ||
| + | and text not like '%Log actively being archived by another process%' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like ' | ||
| + | and text not like '%END BACKUP%' | ||
| + | ) | ||
| + | loop | ||
| + | |||
| + | isdate | ||
| + | message_text := null; | ||
| + | |||
| + | select count(*) | ||
| + | into | ||
| + | from | ||
| + | where substr(r.message_text, | ||
| + | and r.message_text not like ' | ||
| + | |||
| + | if (isdate = 1) then | ||
| + | select to_date(substr(r.message_text, | ||
| + | into message_date | ||
| + | from dual; | ||
| + | if (message_date > max_date) then | ||
| + | start_updating := 1; | ||
| + | end if; | ||
| + | else | ||
| + | message_text := r.message_text; | ||
| + | end if; | ||
| + | |||
| + | if (message_text is not null) and (start_updating = 1) then | ||
| + | insert into alert_log (indx, message_date, | ||
| + | rows_inserted := rows_inserted+1; | ||
| + | end if; | ||
| + | |||
| + | end loop; | ||
| + | |||
| + | sys.dbms_output.put_line(' | ||
| + | sys.dbms_output.put_line(' | ||
| + | |||
| + | commit; | ||
| + | |||
| + | end; | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | Update the alert_log table periodically from crontab with the alerts generated since the last run | ||
| + | < | ||
| + | # | ||
| + | |||
| + | ORAENV_ASK=NO | ||
| + | export ORACLE_SID=fin | ||
| + | export PATH="/ | ||
| + | . oraenv | ||
| + | |||
| + | sqlplus / as sysdba<< | ||
| + | start / | ||
| + | EOSQL | ||
| + | </ | ||
| + | |||
| + | ==== Alert log (11g+) ==== | ||
| + | This reads the log.xml file (which contains the same information as alert.log) | ||
| Show the messages put in the alert log in the last 24 hours | Show the messages put in the alert log in the last 24 hours | ||
| - | < | + | < |
| - | There is also a fixed table X$DBGDIREXT which returns all file and directory names under [[diagnostic_dest]]/diag directory: | + | select substr(MESSAGE_TEXT, |
| - | < | + | , count(*) cnt |
| - | =====Directory paths available in v$diag_info===== | + | from |
| - | < | + | where 1=1 |
| - | =====Find out the names of the active trace files for the running processes===== | + | and (MESSAGE_TEXT like ' |
| - | < | + | and cast(ORIGINATING_TIMESTAMP as DATE) > sysdate - & |
| + | group by substr(MESSAGE_TEXT, | ||
| + | </ | ||
| + | There is also a fixed table X$DBGDIREXT which returns all file and directory names under diagnostic_dest/ | ||
| + | < | ||
| + | select lpad(' ', | ||
| + | from | ||
| + | where rownum <=20; | ||
| + | </ | ||
| + | ==== Directory paths available in v$diag_info ==== | ||
| + | < | ||
| + | set lines 2000 | ||
| + | col name for a35 | ||
| + | col value for a100 wrap | ||
| + | select * from v$diag_info; | ||
| + | |||
| + | | ||
| + | ---------- ----------------------------------- ---------------------------------------------------------------------------------------------------- ---------- | ||
| + | 1 Diag Enabled | ||
| + | 1 ADR Base / | ||
| + | 1 ADR Home / | ||
| + | 1 Diag Trace / | ||
| + | 1 Diag Alert / | ||
| + | 1 Diag Incident | ||
| + | 1 Diag Cdump / | ||
| + | 1 Health Monitor | ||
| + | 1 Default Trace File / | ||
| + | 1 Active Problem Count 0 0 | ||
| + | 1 Active Incident Count | ||
| + | |||
| + | 11 rows selected. | ||
| + | </ | ||
| + | ==== Find out the names of the active trace files for the running processes ==== | ||
| + | < | ||
| + | select pid, program, tracefile from v$process; | ||
| + | </ | ||
| + | |||
| + | ==== Request was to duplicate a large tablespace from production to test environment ==== | ||
| + | ...but rather than copy the terabytes of data, create the destination tablespace with just the most recent months data | ||
| + | === See what the source data looks like === | ||
| + | < | ||
| + | col table_owner | ||
| + | col table_name | ||
| + | col partition_name for a20 | ||
| + | set pages 100 lines 1000 | ||
| - | =====Request was to duplicate a large tablespace | + | select table_owner |
| - | ...but rather than copy the terabytes of data, create the destination tablespace with just the most recent months data< | + | , table_name |
| - | ====See what the source data looks like==== | + | , partition_name |
| - | < | + | , partition_position |
| + | , tablespace_name | ||
| + | from dba_tab_partitions | ||
| + | where tablespace_name | ||
| + | order by 1,2,3,4 | ||
| + | / | ||
| + | </ | ||
| This query produces something like this... | This query produces something like this... | ||
| - | < | + | < |
| - | ====Capture the tablespace metadata==== | + | TABLE_OWNER |
| - | < | + | -------------------- ------------------------------ -------------------- ------------------ ------------------------------ |
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ... | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_ARCHIVE | ||
| + | ADS_DBA | ||
| + | ADS_DBA | ||
| + | ADS_DBA | ||
| + | </ | ||
| + | === Capture the tablespace metadata === | ||
| + | < | ||
| + | set long 1000000 | ||
| + | select dbms_metadata.get_ddl ( ' | ||
| + | / | ||
| + | </ | ||
| produces... | produces... | ||
| - | < | + | < |
| - | ====Capture the tablespace objects' | + | CREATE TABLESPACE " |
| - | < | + | '/ |
| - | ====Find the partitions with data from the most recent month==== | + | AUTOEXTEND ON NEXT 104857600 MAXSIZE 32000M, |
| - | < | + | ... |
| + | '/ | ||
| + | AUTOEXTEND ON NEXT 104857600 MAXSIZE 3200M, | ||
| + | LOGGING ONLINE PERMANENT BLOCKSIZE 8192 | ||
| + | EXTENT MANAGEMENT LOCAL AUTOALLOCATE DEFAULT | ||
| + | | ||
| + | </ | ||
| + | === Capture the tablespace objects' | ||
| + | < | ||
| + | cat<< | ||
| + | userid='/ | ||
| + | dumpfile=adst_ads_archive_metadata.dmp | ||
| + | logfile=expdp_adst_ads_archive_metadata.log | ||
| + | tablespaces=ADS_ARCHIVE | ||
| + | parallel=20 | ||
| + | content=metadata_only | ||
| + | exclude=statistics | ||
| + | EOCAT | ||
| + | |||
| + | expdp parfile=expdp_adst_ads_archive_metadata.par | ||
| + | </ | ||
| + | === Find the partitions with data from the most recent month === | ||
| + | < | ||
| + | cat << | ||
| + | set termo off echo off lines 1000 feedb off headi off newpa none trims on | ||
| + | spool / | ||
| + | select table_owner||' | ||
| + | from | ||
| + | where tablespace_name = ' | ||
| + | and partition_name like ' | ||
| + | / | ||
| + | spool off | ||
| + | |||
| + | echo "@/ | ||
| + | EOCAT | ||
| + | </ | ||
| produces... | produces... | ||
| - | < | + | < |
| - | ====Export the data in these partitions==== | + | ADS_ARCHIVE.FAH_DAY_CBA_BGAAP_ARC: |
| - | < | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | ====Drop the old tablespace==== | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | < | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | ====Recreate the tablepspace using the metadata capture above==== | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | < | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | ====Import the tablespace metadata==== | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | < | + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: |
| - | ====Import the last month partition data==== | + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: |
| - | < | + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: |
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ... | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_BGAAP_ARC: | ||
| + | ADS_ARCHIVE.BIL_MTH_INV_CTR_ARC: | ||
| + | ADS_ARCHIVE.BIL_DAY_INV_CTR_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: | ||
| + | ADS_ARCHIVE.FAH_DAY_MVT_IFRS_ARC: | ||
| + | </ | ||
| + | === Export the data in these partitions === | ||
| + | < | ||
| + | cat<<EOCAT > | ||
| + | userid=' | ||
| + | dumpfile=adst_ads_archive_Y2017_%U.dmp | ||
| + | logfile=expdp_adst_ads_archive_Y2017.log | ||
| + | EOCAT | ||
| + | |||
| + | cat / | ||
| + | |||
| + | cat<< | ||
| + | parallel=20 | ||
| + | exclude=statistics | ||
| + | EOCAT | ||
| + | |||
| + | expdp parfile=expdp_adst_ads_archive_partitions.par | ||
| + | </ | ||
| + | === Drop the old tablespace | ||
| + | < | ||
| + | drop tablespace ads_archive including contents and datafiles | ||
| + | / | ||
| + | </ | ||
| + | === Recreate the tablepspace using the metadata capture above === | ||
| + | < | ||
| + | create tablespace " | ||
| + | datafile '/ | ||
| + | , '/ | ||
| + | , '/ | ||
| + | , '/ | ||
| + | , '/ | ||
| + | / | ||
| + | </ | ||
| + | === Import the tablespace metadata === | ||
| + | < | ||
| + | cat<< | ||
| + | userid='/ | ||
| + | dumpfile=adst_ads_archive_metadata.dmp | ||
| + | logfile=impdp_adst_ads_archive_metadata.log | ||
| + | EOCAT | ||
| + | |||
| + | impdp parfile=impdp_adst_ads_archive_metadata.par | ||
| + | |||
| + | select 'alter index '|| owner||' | ||
| + | from | ||
| + | where status = ' | ||
| + | / | ||
| + | </ | ||
| + | === Import the last month partition data === | ||
| + | < | ||
| + | cat<< | ||
| + | userid='/ | ||
| + | dumpfile=adst_ads_archive_Y2017_%U.dmp | ||
| + | logfile=impdp_adst_ads_archive_Y2017.log | ||
| + | table_exists_action=append | ||
| + | parallel=20 | ||
| + | EOCAT | ||
| + | |||
| + | impdp parfile=impdp_adst_ads_archive_partitions.par | ||
| + | </ | ||
| - | =====Write to a TCP/IP socket from PL/SQL===== | + | ==== Write to a TCP/IP socket from PL/SQL ==== |
| - | < | + | Like writing to a pipe to get data out to the shell from PL/SQL |
| + | < | ||
| + | declare | ||
| + | | ||
| + | | ||
| + | | ||
| + | begin | ||
| + | | ||
| + | , | ||
| + | , | ||
| + | ) | ||
| + | ; | ||
| + | | ||
| + | ; | ||
| + | | ||
| + | ; | ||
| + | | ||
| + | ; | ||
| + | exception | ||
| + | when others then | ||
| + | raise_application_error(-20101, | ||
| + | ; | ||
| + | utl_tcp.close_connection(bt_conn) | ||
| + | ; | ||
| + | end | ||
| + | ; | ||
| + | / | ||
| + | </ | ||
| and read from it using netcat in another session | and read from it using netcat in another session | ||
| - | < | + | < |
| + | nc -lk 5000 | ||
| + | </ | ||
| produces | produces | ||
| - | < | + | < |
| - | =====Execute a script as a different user===== | + | Sequence Number 101021 |
| - | Some things have to be done as the user and cannot be done as sys (eg. creating and dropping jobs or create and drop a database link)<br /> | + | </ |
| - | It is possible to login as the user by saving, changing and resetting his password (using identified by values) but using dbms_sys_sql is another option<br /> | + | ==== Drop all database links on an instance using the user procedure method ==== |
| + | < | ||
| + | set feed off head off lines 1000 pages 0 newpa none termo off trims on | ||
| + | spool / | ||
| + | |||
| + | select 'drop public database link "' | ||
| + | from | ||
| + | where owner = ' | ||
| + | / | ||
| + | select ' | ||
| + | from | ||
| + | where owner not in (' | ||
| + | / | ||
| + | select 'exec ' | ||
| + | from | ||
| + | where owner not in (' | ||
| + | / | ||
| + | select 'drop procedure ' | ||
| + | from | ||
| + | where owner not in (' | ||
| + | / | ||
| + | spool off | ||
| + | |||
| + | set termo on lines 80 head on feed on newpa 1 | ||
| + | prompt | ||
| + | </ | ||
| + | |||
| + | ==== Drop all database links on an instance (from shell) using sqldba cursor method ==== | ||
| + | < | ||
| + | # | ||
| + | # ============================================================================== | ||
| + | # Name : drop_database_links.ksh | ||
| + | # Description | ||
| + | # | ||
| + | # Parameters | ||
| + | # | ||
| + | # Example | ||
| + | # | ||
| + | # Modification History | ||
| + | # ==================== | ||
| + | # When Who | ||
| + | # ========= ================= ================================================== | ||
| + | # 09-OCT-19 Stuart Barkley | ||
| + | # ============================================================================== | ||
| + | |||
| + | # -------------------------- | ||
| + | # setup database environment | ||
| + | # -------------------------- | ||
| + | if [[ $# -ne 1 ]]; then | ||
| + | echo " | ||
| + | exit 1 | ||
| + | fi | ||
| + | |||
| + | ORAENV_ASK=NO | ||
| + | export ORACLE_SID=" | ||
| + | . oraenv >/ | ||
| + | if [[ $? -ne 0 ]]; then | ||
| + | echo " | ||
| + | exit 1 | ||
| + | fi | ||
| + | |||
| + | TMPFILE="/ | ||
| + | |||
| + | # ---------------------------------------- | ||
| + | # build procedure to remove database links | ||
| + | # ---------------------------------------- | ||
| + | " | ||
| + | whenever sqlerror continue | ||
| + | set headi off newpa none feedb off verif off trims on | ||
| + | create or replace procedure drop_dblink ( p_schema_name in varchar2 | ||
| + | , p_db_link | ||
| + | ) as | ||
| + | plsql | ||
| + | cur | ||
| + | uid | ||
| + | rc number; | ||
| + | begin | ||
| + | select u.user_id | ||
| + | into uid | ||
| + | from | ||
| + | where u.username = p_schema_name | ||
| + | ; | ||
| + | plsql := 'drop database link "' | ||
| + | cur := sys.dbms_sys_sql.open_cursor; | ||
| + | sys.dbms_sys_sql.parse_as_user ( c => cur | ||
| + | , statement | ||
| + | , language_flag => dbms_sql.native | ||
| + | , userid | ||
| + | ); | ||
| + | rc := sys.dbms_sys_sql.execute (cur); | ||
| + | sys.dbms_sys_sql.close_cursor (cur); | ||
| + | end; | ||
| + | / | ||
| + | |||
| + | spool / | ||
| + | select 'drop public database link ' | ||
| + | from | ||
| + | where owner = ' | ||
| + | / | ||
| + | select 'exec drop_dblink( ''' | ||
| + | from | ||
| + | where owner != ' | ||
| + | / | ||
| + | spool off | ||
| + | rem start / | ||
| + | EOSQL | ||
| + | grep ' | ||
| + | if [[ $? -eq 0 ]]; then | ||
| + | echo " | ||
| + | cat " | ||
| + | rm -rf " | ||
| + | exit 1 | ||
| + | fi | ||
| + | rm -rf " | ||
| + | exit 0 | ||
| + | </ | ||
| + | |||
| + | ==== From a dba or sysdba account, test database links owned by a user to see if they are valid ==== | ||
| + | * borrowed from [[https:// | ||
| + | < | ||
| + | set serveroutput on | ||
| + | begin | ||
| + | dbms_output.enable(null); | ||
| + | for f in (select * | ||
| + | from | ||
| + | order by owner | ||
| + | , db_link | ||
| + | ) | ||
| + | loop | ||
| + | dbms_scheduler.create_job ( job_name | ||
| + | , job_type | ||
| + | , job_action | ||
| + | ||' | ||
| + | ||' | ||
| + | ||' | ||
| + | ||' | ||
| + | ||' | ||
| + | ); | ||
| + | begin | ||
| + | dbms_scheduler.run_job (f.owner||' | ||
| + | exception | ||
| + | when others then | ||
| + | dbms_output.put_line (f.owner||' | ||
| + | end; | ||
| + | dbms_scheduler.drop_job (f.owner||' | ||
| + | end loop; | ||
| + | end; | ||
| + | / | ||
| + | </ | ||
| + | |||
| + | ==== Execute a script as a different user ==== | ||
| + | Some things have to be done as the user and cannot be done as sys (eg. creating and dropping jobs or create and drop a database link) | ||
| + | It is possible to login as the user by saving, changing and resetting his password (using identified by values) but using dbms_sys_sql is another option | ||
| Create a PL/SQL procedure to drop a db link | Create a PL/SQL procedure to drop a db link | ||
| - | < | + | < |
| + | create or replace procedure drop_dblink ( p_schema_name in varchar2 | ||
| + | , p_dblink | ||
| + | ) is | ||
| + | l_plsql | ||
| + | l_cur | ||
| + | l_uid | ||
| + | l_rc number; | ||
| + | begin | ||
| + | select | ||
| + | into l_uid | ||
| + | from dba_users du | ||
| + | where | ||
| + | ; | ||
| + | plsql := 'drop database link "' | ||
| + | l_cur := sys.dbms_sys_sql.open_cursor; | ||
| + | sys.dbms_sys_sql.parse_as_user( | ||
| + | c => l_cur, | ||
| + | statement | ||
| + | language_flag => dbms_sql.native, | ||
| + | userid | ||
| + | ); | ||
| + | l_rc := sys.dbms_sys_sql.execute(l_cur); | ||
| + | sys.dbms_sys_sql.close_cursor(l_cur); | ||
| + | end; | ||
| + | / | ||
| + | </ | ||
| Generate the calls to the procedure with the required parameters | Generate the calls to the procedure with the required parameters | ||
| - | < | + | < |
| + | select 'exec drop_dblink (''' | ||
| + | from | ||
| + | where owner != ' | ||
| + | and db_link like ' | ||
| + | or | ||
| + | / | ||
| + | </ | ||
| and this reveals the lines to be executed | and this reveals the lines to be executed | ||
| - | < | + | < |
| + | exec drop_dblink (' | ||
| + | exec drop_dblink (' | ||
| + | </ | ||
| Drop the procedure when finished with it | Drop the procedure when finished with it | ||
| - | < | + | < |
| + | drop procedure drop_dblink; | ||
| + | </ | ||
| or do it as an anonymous block in one go... | or do it as an anonymous block in one go... | ||
| - | < | + | < |
| - | =====Handling single quote marks inside SQL statements===== | + | declare |
| - | < | + | uid number; |
| + | sqltext varchar2(1000) := 'drop database link "& | ||
| + | myint integer; | ||
| + | begin | ||
| + | select user_id into uid from dba_users where username = '& | ||
| + | myint: | ||
| + | sys.dbms_sys_sql.parse_as_user(myint, | ||
| + | sys.dbms_sys_sql.close_cursor(myint); | ||
| + | end; | ||
| + | / | ||
| + | </ | ||
| + | ==== Handling single quote marks inside SQL statements ==== | ||
| + | < | ||
| + | SQL> SELECT ' | ||
| - | =====Run a script on all databases in /etc/oratab===== | + | NAME |
| - | < | + | ------- |
| - | =====Schedule a cron job to run on the last day of each month===== | + | D' |
| - | < | + | |
| - | =====How to tell if a year is a leap year or not - using cal instead of reinventing the wheel===== | + | Alternatively, |
| - | Test: Why is Julian day 0, 17th November 1858?<br /> | + | |
| - | Pretty comprehensive answer: [[http:// | + | SQL> SELECT q' |
| + | |||
| + | NAME | ||
| + | ------- | ||
| + | D' | ||
| + | </ | ||
| + | |||
| + | ==== Run a script on all databases in /etc/oratab ==== | ||
| + | < | ||
| + | export ORAENV_ASK=NO | ||
| + | for db in `awk -F: 'NF && $1 !~/[#*]/ {print $1}' | ||
| + | do | ||
| + | echo $db | ||
| + | ORACLE_SID=$db | ||
| + | . oraenv | ||
| + | sqlplus -V | ||
| + | done | ||
| + | </ | ||
| + | ==== Schedule a cron job to run on the last day of each month ==== | ||
| + | < | ||
| + | 0 23 28-31 * * [ $(date -d +1day +%d) -eq 1 ] && su - oracle -c " | ||
| + | </ | ||
| + | ==== How to tell if a year is a leap year or not - using cal instead of reinventing the wheel ==== | ||
| + | Test: Why is Julian day 0, 17th November 1858?\\ | ||
| + | Pretty comprehensive answer: [[http:// | ||
| Also attempts to answer the question why typing 'cal 09 1752' on Unix systems looks so odd! | Also attempts to answer the question why typing 'cal 09 1752' on Unix systems looks so odd! | ||
| - | < | + | < |
| + | function leap { | ||
| + | # This function tells you if the year passed in as a parameter is a leap year or not... | ||
| + | if [[ $(cal 2 ${1:-$(date ' | ||
| + | leap=' | ||
| + | else | ||
| + | leap=' | ||
| + | fi | ||
| + | echo ${leap} | ||
| + | } | ||
| + | </ | ||
| - | =====Reset an expired (and/or locked) user password to the same value===== | + | ==== Clone an Oracle Home ==== |
| - | Can be used to save user passwords for example in a test environment before destroying them with a copy of production.< | + | Cloning an ORACLE_HOME saves much time if this has to be done several times.\\ |
| - | Simple, quick and easy version. Run this then pick and choose the statements you want to run. | + | |
| - | < | + | On source server |
| + | < | ||
| + | cd $ORACLE_HOME | ||
| + | tar -cvf - . | gzip -c > clonehome.tar.gz | ||
| + | </ | ||
| + | Copy the archive to the destination server.\\ | ||
| + | |||
| + | On the destination server | ||
| + | < | ||
| + | mkdir -p <where you want the new ORACLE_HOME to be> | ||
| + | export ORACLE_HOME=< | ||
| + | gunzip -c clonehome.tar.gz | tar -xvf - | ||
| + | </ | ||
| + | * Check $ORACLE_HOME/ | ||
| + | * Check $ORACLE_HOME/ | ||
| + | < | ||
| + | ${ORACLE_HOME}/ | ||
| + | </ | ||
| + | As root\\ | ||
| + | Check logfile for path to oraInventory | ||
| + | < | ||
| + | <path to oraInventory>/ | ||
| + | <path to ORACLE_HOME>/ | ||
| + | </ | ||
| + | * Add $ORACLE_HOME/ | ||
| + | * Run netca to create a listener | ||
| + | |||
| + | |||
| + | ==== Reset an expired (and/or locked) user password to the same value ==== | ||
| + | Can be used to save user passwords for example in a test environment before destroying them with a copy of production. | ||
| + | <code> | ||
| + | set lines 200 pages 200 head off | ||
| + | col user_list for a200 head " | ||
| + | |||
| + | select 'alter user ' | ||
| + | , null, decode( su.password | ||
| + | , null, 'not identified' | ||
| + | , su.password | ||
| + | ) | ||
| + | , decode( su.password | ||
| + | , null, su.spare4 | ||
| + | , su.spare4||';' | ||
| + | ) | ||
| + | | ||
| + | from | ||
| + | , dba_users du | ||
| + | where 1=1 | ||
| + | and su.name = du.username | ||
| + | and du.oracle_maintained = ' | ||
| + | and (account_status | ||
| + | order by su.name | ||
| + | / | ||
| + | </ | ||
| More industrialised version. Create a package. | More industrialised version. Create a package. | ||
| - | < | + | < |
| - | Call it | + | create or replace package dba_utils |
| - | < | + | as |
| + | procedure reset_password ( p_userlist in varchar2 ); | ||
| + | end dba_utils; | ||
| + | / | ||
| - | =====Latch, mutex and beyond===== | ||
| - | A fine example of how to write scripts from SQL*Plus that interact with the shell underneath< | ||
| - | Found [[https:// | ||
| - | < | ||
| - | ======shell====== | + | create or replace package body dba_utils |
| + | as | ||
| + | |||
| + | procedure reset_password ( p_userlist in varchar2 ) | ||
| + | is | ||
| + | cursor c1 ( p_userlist in varchar2 ) is | ||
| + | select username | ||
| + | , regexp_substr ( dbms_metadata.get_ddl(' | ||
| + | from | ||
| + | where 1=1 | ||
| + | and username in upper( p_userlist ) | ||
| + | ; | ||
| + | begin | ||
| + | dbms_output.enable(null); | ||
| + | for r1 in c1 ( p_userlist ) | ||
| + | loop | ||
| + | begin | ||
| + | execute immediate 'alter user ' || r1.username || ' account unlock'; | ||
| + | execute immediate 'alter user ' || r1.username || ' identified by values ' || r1.pwd; | ||
| + | exception | ||
| + | when others then | ||
| + | dbms_output.put_line(' | ||
| + | end; | ||
| + | end loop; | ||
| + | end reset_password; | ||
| + | |||
| + | end dba_utils; | ||
| + | / | ||
| + | </ | ||
| + | Call it | ||
| + | < | ||
| + | set serveroutput on | ||
| + | exec dba_utils.reset_password (' | ||
| + | </ | ||
| + | |||
| + | ===== shell ===== | ||
| [[http:// | [[http:// | ||
| - | ====Count the number of background processes for a particular SID==== | + | === Count the number of background processes for a particular SID === |
| - | < | + | < |
| + | SID=RAVJDE1 | ||
| + | ps -ef|cut -c54-100|awk -v SID=$SID '$0 ~ /' | ||
| + | </ | ||
| or ...this will work with any OS (so long as the process is at the end of the < | or ...this will work with any OS (so long as the process is at the end of the < | ||
| - | < | + | < |
| + | SID=RAVJDE1 | ||
| + | ps -ef|awk -v SID=$SID -F_ '{ $NF ~ /' | ||
| + | </ | ||
| Pick the columns you want in the ps listing | Pick the columns you want in the ps listing | ||
| - | < | + | < |
| - | ====Find Oracle errors in the most recent DataPump log files==== | + | ps -efo user, |
| - | < | + | </ |
| + | === Find Oracle errors in the most recent DataPump log files === | ||
| + | < | ||
| + | 00 09 * * * / | ||
| + | </ | ||
| - | ====Find the 10 largest directories on a particular filesystem==== | + | === Find the 10 largest directories on a particular filesystem === |
| - | < | + | < |
| + | du -ag /oracle | sort -nr | head -n 10 | ||
| + | </ | ||
| - | ====Find the 10 largest files on a particular mount point==== | + | === Find the 10 largest files on a particular mount point === |
| - | < | + | < |
| + | find /home -size +1g -ls | sort -nr +6 | head -10 | ||
| + | </ | ||
| or | or | ||
| - | < | + | < |
| + | find /home -xdev -ls | sort -nr +6 | head -10 | ||
| + | </ | ||
| - | ====Split a file in pieces using sed==== | + | === Split a file in pieces using sed === |
| Using fixed line numbers | Using fixed line numbers | ||
| - | < | + | < |
| + | sed -ne ' | ||
| + | sed -ne ' | ||
| + | </ | ||
| Using line with a /pattern/ on it | Using line with a /pattern/ on it | ||
| - | < | + | < |
| + | sed -e '/ | ||
| + | sed -n `sed -n '/ | ||
| + | </ | ||
| + | |||
| + | === Split a file in pieces using Perl === | ||
| + | < | ||
| + | perl -ne 'print if 1 .. /marker line/' text.dat > | ||
| + | perl -ne 'print if /marker line/ .. eof()' text.dat > | ||
| + | </ | ||
| - | ====Split a file in pieces using Perl==== | + | === Find and display RMAN errors |
| - | < | + | < |
| + | sed -ne '/ | ||
| + | </ | ||
| - | ====Find and display RMAN errors | + | ===== cmd ===== |
| - | < | + | * [[http:// |
| + | ===== Perl ===== | ||
| + | ==== For all files in a directory, replace a keyword (SSIIDD) | ||
| + | < | ||
| + | for i in `ls` | ||
| + | do | ||
| + | perl -p -i -e ' | ||
| + | done | ||
| + | </ | ||
| - | ======cmd====== | + | ==== Parse tnsnames.ora to get just the service names ==== |
| - | *[[http://www.dba-oracle.com/t_scripts_windows_export.htm|Database Export | + | [[http://stackoverflow.com/questions/ |
| - | ======Perl====== | + | < |
| - | =====For all files in a directory, replace a keyword | + | perl -ne 'print if m/^([^#()\W ][a-zA-Z.]*(?:[.][a-zA-Z]*\s?=)?)/' $TNS_ADMIN/ |
| - | < | + | </ |
| - | =====Parse tnsnames.ora | + | ==== and use that to check the listener status |
| - | [[http:// | + | < |
| - | < | + | for svc in `perl -ne 'print if m/^([^#()\W ][a-zA-Z.]*(?:[.][a-zA-Z]*\s? |
| + | tnsping $svc | ||
| + | done | ||
| + | </ | ||
| - | =====and use that to check the listener status===== | ||
| - | < | ||
handy_scripts.1544273360.txt.gz · Last modified: 2018/12/08 12:49 by 0.0.0.0
