PLSQL-SQL_PLSQL_Tips_and_Tricks by yvtong

VIEWS: 57 PAGES: 39

									         SQL, PL/SQL Tips and Tricks
                              Plamen Petkov
References:
  Expert Oracle Database Architecture.                         Thomas Kyte
  Oracle Documentation                                         ORACLE
  Oracle MetaLink                                              ORACLE
  SQL Cookbook.                                            Anthony Molinaro

  Personal experience                                      Plamen Petkov
  ......................................................
                        Agenda
●   Mining alert/logs files
●   ORA-01555: snapshot too old
●   ORA-01002 fetch out of sequence
●   Optimistic Lock and ORA_ROWSCN
●   Selecting from LONG columns
●   Row Generation
●   Different Timezones support
    Mining alert and/or other logs
●   Identify the place of the alert log




                 \mining_alert\locate_alert.sql


●   Create external tables
●   Select from alert.log
●   Select from apache access.log
                   Create external tables
                                                      ●It is actually a file located at
create or replace directory alert_dir as              the ORACLE server file
'D:\Documentation\PL_SQL_Tips_Tricks\mining_alert';   system
                                                      ●alert_dir “directory” is an
CREATE TABLE alert_log(
  text_line varchar2(255)                             oracle object, mapped to a
)                                                     file system directory
                                                      ●Now, alert_log appears to
ORGANIZATION EXTERNAL(
    TYPE ORACLE_LOADER                                us as normal Oracle table,
    DEFAULT DIRECTORY alert_dir                       for reading only, containing
    ACCESS PARAMETERS(                                all lines of the DB alert log
        records delimited by newline
        fields
        REJECT ROWS WITH ALL NULL FIELDS
    )
    LOCATION(
        'alert_gbdb.log'
    )
)
REJECT LIMIT unlimited
;


               mining_alert\create_external.sql
Select from alert.log – uptime report                         we are looking for
                                                              ●

                                                              Starting ORACLE
                                                              instance literal;
                                                              ●at the previous line
                                                              is the time of the
                                                              startup;
                                                              ●so, at the line

                                                              before the previous
                                                              one is the time of
                                                              the previous
                                                              shutdown

                                         Mon May 22 07:51:37 2006
                                         Starting ORACLE instance (normal)
                                         ....................
                                         Mon May 22 08:15:14 2006
                                         ALTER DATABASE DISMOUNT
                                         ..................
                                         Archive process shutdown avoided: 0 active
                                         Mon May 22 08:15:44 2006
                                         Starting ORACLE instance (normal)
                                         ..................




     mining_alert\alert.log_Uptime.sql
    Select from alert.log – ORA Events
                                 Mon May 22 07:54:23 2006
                                 Errors in file /u01/app/oracle/admin/gbdb/bdump/gbdb_dbw0_22656.trc:
                                 ORA-01157: cannot identify/lock data file 201 - see DBWR trace file
                                 ORA-01110: data file 201: '/u16/app/oracle/oradata/gbdb/temp01.dbf'




                                                                 ●we are looking for ORA-xxx
                                                                 messages, which are usually errors;
                                                                 ●at the previous line is the time of the

                                                                 event;
                                                                 ●last predicate filters all additional error

                                                                 messages;




mining_alert\alert.log_ORA.sql

                  Additional info on mining into alert.log: http://asktom.oracle.com/~tkyte/alert_arch.html
Select from apache.log – performance issues
                                    ●apache_log is an external table,
                                    pointing to
                                    app.access_200607102312 file
                                    ●From each record there we derive

                                    the request, request type and
                                    response time per request
                                    ●Aggregating we see our slowest


                                    modules at the 23rd hour were
                                    cc_deposits and dowithdraw




      mining_alert\apache.log.sql
      Write to external Data Pump Files
create table apache_log_unload organization external(
  type oracle_datapump
  default directory alert_dir
  location ('apache.log.unload.pump')
)
as select count(1) cnt_Req, max(RT) max_RT,
  to_char(trunc(req_Time, 'HH24'), 'dd.mm.yy HH24:MI') req_Time_HH, req_type, Request
from (
  select req_Time, req_type,
....................

                        \mining_alert\apache.log.datapump.unload.sql   ●Feature of Oracle 10.1 and above
                                                                       ●Using the previous select we

                                                                       create a data pump format file,
                                                                       containing the aggregations from
                                                                       apache_log table
                                                                       ●It contains the RS in binary format

                                                                       plus XML meta data (not
                                                                       CREATE/INSERT)




            mining_alert\APACHE.LOG.UNLOAD.PUMP
Read from external Data Pump Files
                                                                        ●As usual external table, but of
                                                                        oracle_datapump format

                                         Created with the
                                         statement from the
                                         previous slide


                                                                        ●The new tools IMPDP/EXPDP
                                                                        designed for VERY HIGH-SPEED
                                                                        movement of data between DBs



                                                              New tools IMPDP/EXPDP benefits from the
                                                              ●

                                                              meta data:
                                                                ● Oracle stores character set in the file,
                                                                  so IMPDP it performs Character Set
                                                                  conversion on the fly when transferring
                                                                  data to another DB
                                                                ● they easily replace table names etc.
                                                                ● Can be suspended/resumed
                                                                ● Perform parallel operation
                                                                ● ............

   mining_alert\mining_alert\apache.log.datapump.load.sql
ORA-01555: snapshot too old error

NOTE: ORA-01555 is NOT related to data corruption.
It is an error during READING of data.
Current transaction is rolled back.


●   UNDO is too small for the work we do
    /Our program(s) fetch through commits/
●   Our program commits through fetches
●   Delayed Block Cleanout
             UNDO is in fact, too small
       /Our program(s) fetch through commits/
       We will update/commit a relatively big table using relatively small
       Undo Tablespace and will try to read from it in the same time

create undo tablespace undo_small datafile
  'D:\oracle\oradata\orcl\undo_small.dbf' size 2 m
  autoextend off;
alter system set undo_tablespace = undo_small;


create table big_Table_Random as select * from all_objects
  order by dbms_random.random;
alter table big_Table_Random add constraint
  pk_big_Table_Random primary key (object_id) using index
  tablespace users;
exec dbms_stats.gather_table_stats(user, 'BIG_TABLE_RANDOM',
  cascade => true);

                           ORA-01555\scripts\undo_small.sql
                                        A process updates
                                        Big_Table_Random....




 \ORA-01555\scripts\Update_Commit.sql
                                        ...While another process tries
                                        to fetch from it...
                                        ....and in some point of time hits
                                        a block, modified and committed
                                        after the point of time, reading
                                        has started (read consistency)



                                        What to do:
                                        1. Increase rollback /undo retention time/
                                        2. tune queries




ORA-01555\scripts\random_Fetch.sql
               Our program commits through fetches
alter database datafile 'D:\oracle\oradata\orcl\undo_small.dbf'
resize 150m;

                                                Even with bigger Undo...



                                               ...committing too often,
                                               while fetching the same
                                               data, at some point at time
                                               we hit modified and
                                               committed by us block.
                                               As it is modified AFTER the
                                               query has started, oracle
                                               can not continue due to read
                                               consistency




   ORA-01555\scripts\random_Fetch_Commit.sql
What to DO to avoid Ora-01555
                                                  1. Size Undo properly
                                                  2. Commit when needed
                                                  3. Do it as simple as possible
                                                  4. Tune the application
                                                  (queries)




  Performing mass update with improperly sized Undo, we may hit
  ORA-30036: unable to extend segment by 8 in undo tablespace 'UNDO_SMALL'
   but the good news however is that the data is in the consistent state as before
  the mass update.
  As an alternative, we can split the data by some business criteria and perform
  the update on each distinctive part of it.
What is Delayed Block Cleanout
                                     One of the things oracle does during
                                     commit is to clear the transaction info
                                     from the block header.
                                     However, when modified blocks are
                                     more than 10% of the buffer cache, or
                                     the block is not available, oracle
                                     leaves this action to the next query
                                     touching the block.
                                     This is called Delayed Block Cleanout .
                                     Side effects:
                                     ●redo generated by select statement;

                                     ●dirty blocks => DBW writes to disk

                                     again
                                     ●   2 MB UNDO
                                     ●   4 MB buffer cache




   ORA-01555\scripts\t_500_dbc.sql
                        Delayed Block Cleanout and ORA-01555
0. A block is changed by a transaction TRN0, committed and not cleaned out
That means its header still points to the UNDO tablespace where its transaction entry
is, having Commit SCN = SCN0 given during the commit.                                                                                  UNDO lowest SCN
1. A long running transaction SCN1 begins. SCN1 is the read
consistent SCN, this transaction must rollback in order to achieve                                                                             slot0
read consistency                                                            Tablaspace USERS                                    TRN0 | Commit SCN0 | Transaction Entry
                                                                            Block0
                                                                                  TRN0

2. The DB is alive – means there is a lot of processing. This processing has nothing to do
with block0 but uses a lot of Undo, thus reusing the slot0.
Because of the commits, the lowest SCN, recorded in UNDO goes bigger than SCN1                                           UNDO lowest SCN > SCN1
3. Meanwhile, nobody touches our block0...so it is still not cleaned out.
                                                                                                                Commit SCNx | slot0 | Transaction Entry for SCNx
4. Our transaction (SCN1) eventually touches block0. Seeing the pointer in the header, oracle
goes to UNDO to see what to do with this block

                      We can not find
                      our SCN0 in the
                      UNDO....                               Commit SCN0 found in UNDO

                                                   no                                                   yes
...so we do not
whether it is
committed before or
after SCN1                              lowestSCN < SCN1                                     Commit SCN0 < SCN1
                         no
                                                                                     yes

     ORA-01555: Snapshot too old                   yes                                                                     no
                                                             Use the block from the buffer cache and clean it


                                                                                                              Rollback the block before using it



                                                                                                  end
                 SCN0 is cycled out...so it is less than the lowest SCN recorded in the
                 UNDO. That means the lowest UNDO SCN is a good upper bound.
                 Now, if or query read consistent SCN1 is bigger than the lowest SCN,
                 that means it started AFTER the commit over BLOCK0. So it can use it
                 from the buffer cache.                                                                   ORA-01555\images\Delayed Cleanout and 1555.odg
                 Its header will be cleaned as well...but this is not the case (p.2)
                                                 We will use previously created t_500
                                                 table to produce around 500 dirty
                                                 blocks.
                                                 After update and COMMIT, we open a
                                                 ref cursor – this is the read consistent
                                                 time for this ref cursor.



                                      begin
                                        for i in 1..1000 loop
                                          update t_Small s set s.y = i where s.id
                                      = &1;
                                          commit;
                                         end loop;
                                      end;
                                      /
                                      exit;
                                                   Using t_Small we utilize the whole
                                                   undo in 9 simultaneous sessions...


                                                   ...and got ora-01555 when fetching
                                                   the result set


ORA-01555\scripts\ora_01555_dbc.sql
                                                 Prevent: touch the modified blocks:
                                                 DBMS_STATS.gather_table_stats(...)
         ORA-01002 fetch out of sequence
create or replace procedure upd_Commit(i_Obj_Id in number, i_Obj_Name in varchar2,
i_Count in number) is
begin                                                                                       A PL/SQL processing which at
  update big_table_random set object_name = lower(i_Obj_Name) where object_id = i_Obj_Id;   some point at time ends the
  if(i_Count = 101)then
    commit;                                                                                 transaction for whatever reason
    dbms_output.put_line(i_Count || ', ' || i_Obj_Id || ' committed');
  end if;
exception
  when others then
    dbms_output.put_line(i_Count || ', ' || i_Obj_Id || ': ' || i_Obj_Name || ', Exc: '
|| sqlerrm);
    rollback;
end upd_Commit;


                           ORA-01002\upd_Commit.prc




                                                                                            A for update for loop
                                                                                            unexpectedly got ORA-01002


                                                                                            Interesting:
                                                                                            in 9.2.0.7 the effect is missing




                         ORA-01002\for_loop.pdc
ORA-01002 – bulk collect workaround




                                                         The cursor is CLOSED before the
                                                         transaction is closed – either by
                                                         commit or rollback.
                                                         This prevents ORA-01002
 ORA-01002\bulk_Collect.sql
                                                         The code in the loop body (too
                                                         complex sometimes) is actually not
                                                         changed.



         The first COMMIT/ROLLBACK ends the transaction, so all
         selected for updates records so far are unlocked again.
        ORA-01002 – autonomous transaction wrapper
create or replace procedure upd_Commit(i_Obj_Id in number, i_Obj_Name in varchar2, i_Count
in number) is
                                                                                                Wrapping in autonomous
begin                                                                                           transaction means
  update big_table_random_a set object_name = lower(i_Obj_Name) where object_id = i_Obj_Id;
  if(i_Count = 101)then
                                                                                                rollback/commit ends another
    commit;                                                                                     transactions, so is unable to
    dbms_output.put_line(i_Count || ', ' || i_Obj_Id || ' committed');                          close our for update cursor
  end if;
exception
  when others then
    dbms_output.put_line(i_Count || ', ' || i_Obj_Id || ': ' || i_Obj_Name || ', Exc: ' ||
sqlerrm);
    rollback;
end upd_Commit;

create or replace procedure upd_Commit_Autonomous(i_Obj_Id in number, i_Obj_Name in
varchar2, i_Count in number) is
  pragma autonomous_transaction;
begin
  upd_Commit(i_Obj_Id, i_Obj_Name, i_Count);
  commit;
exception
  when others then
    rollback;
    raise;
end upd_Commit_Autonomous;

   ORA-01002\upd_Commit_Autonomous.prc

Dead Lock is possible if it is about the
same object

In both cases we have prerequisites to
leave data inconsistent – it is just bad
practice to control the transaction
inside the procedure.. its the initiator's
job to do it.

                                                                       ORA-01002\for_loop.pdc
    Optimistic Lock and ORA_ROWSCN
Optimistic lock (not DB lock) is a method preventing LOST UPDATES.
It is an alternative to the Pessimistic Locking.
It is Optimistic Lock, related to web applications.
With pessimistic lock the thin client can leave the DB object locked for
ever (until the DBA takes care).
Here we are about to compare two methods to perform optimistic lock:


●Using a special column, maintained by a trigger or
application to tell us the version of the record.
●Using the new Oracle 10g feature ORA_ROWSCN
Optimistic Lock using Version Column
                                              ●We create table dept as a copy of
                                              scott.dept with additional version
                                              column “last_mod”.
                                              ●As it is with a precision of fractional of

                                              the second, it is quite enough in a
                                              human being environment.
                                              ●The idea is to check if the row is

                                              modified just before we modify it in the
                                              DB.
                                              ●Sequence number can be used as

                                              well, but:
                                                 ●  It does not give us the additional
                                                    info of the actual time,
                                                    modification has happened
                                                 ●  We will an additional sequence to
                                                    maintain




 Optimistic Locking\Version Column\dept.tab
                                   Lost Update Prevention
create or replace package body manage_dep is

------------------------------------------------------------------------
  function derive_Dep_Data(i_Deptno in dept.deptno%type) return
Ref_Cursor is
    v_RC Ref_Cursor;
  begin
    open v_RC for select * from dept where deptno = i_Deptno;
    return v_RC;
  end;


-------------------------------------------------------------------------
  procedure modify_Dep(i_deptno in dept.deptno%type, i_dname in
dept.dname%type,
    i_loc in dept.loc%type, i_las_mod in dept.last_mod%type) is
  begin
    update dept set dname = i_dname, loc = i_loc, last_mod =                Optimistic Lock\Version Column\upd_Dept.sql
CURRENT_TIMESTAMP
    where deptno = i_deptno and last_mod = i_las_mod;

    dbms_output.put_line(sql%rowcount || ' Departments changed!');
  end;
end manage_dep;


           Optimistic Lock\Version Column\manage_dep.pck



                                                                       In a multiuser environment, we have two (or
                                                                       more) clients editing the same department.
                                                                       Both see the same data.....




           Optimistic Lock\Version Column\upd_Dept.sql
                                                                                      ...But only the first one succeeds in
                                                                                      her update...




                 Optimistic Lock\Version Column\upd_Dept.sql




... The second updates nothing
as it is already updated. I it is up
to the business how to describe
this to the unhappy user.




                                                               Optimistic Lock\Version Column\upd_Dept.sql
                Optimistic Lock using ORA_ROWSCN
ORA_ROWSCN is actually a version column, but provided by ORACLE – starting from 10g Release 1.
ORA_ROWSCN is based on SCN. As we know that means based in always advancing at commit time SCN.
This is still not the pain killer. ORA_ROWSCN is, by default, maintained at block level, not at row level.
However we have the power to change this, but only at the time of creating the segment!




                                                                                 In a block we have two rows...




                                                                               ...both maintained with one
                                                                               and the same ROWSCN




               Optimistic Lock\ORA_ROWSCN\dept_rowscn_block.tab
                                                 “ROWDEPENDENCIES (row
                                                 dependency tracking) was to
                                                 the database with Oracle9i in
                                                 support of advanced replication to
                                                 allow for better parallel
                                                 propagation of changes.”
                                                 Starting with Oracle 10g we can
                                                 use it to implement effective
                                                 Optimistic Locking using
                                                 ORA_ROWSCN. This will add 6
                                                 bytes overhead to each row. So,
                                                 it DOES NOT save space
                                                 depending with do-it-by-yourself
                                                 Version Column




Optimistic Lock\ORA_ROWSCN\dept_rowscn_row.tab
      Converting an SCN to wall clock time
                                                               Another benefit of the
                                                               transparent ORA_ROWSCN
                                                               column is that we can convert it
                                                               to wall clock time using
                                                               SCN_TO_TIMESTAMP built-in




However, there are some limitations:
1. SCN_TO_TIMESTAMP works approximately +/- 3 seconds.
2. It works about 5 days of database uptime




          Optimistic Locking\ORA_ROWSCN\scn_to_timestamp.sql
Selecting from LONG columns
                          It is always an issue to identify
                          which view(s) references some
                          table or another view.




                          And this is not only about
                          views, but a lot of more bloody
                          dictionary objects using LONG
                          data type columns




    \LONG\all_views.sql
                  Benefit from dbms_sql.column_value_long
create or replace package body long_help is
  g_cursor number := dbms_sql.open_cursor;
  g_query varchar2(32765);

 procedure bind_variable( p_name in varchar2, p_value in varchar2 ) is                 ●The query provided (p_query) must select at
 begin
   if ( p_name is not null )then                                                       most one record.
     dbms_sql.bind_variable( g_cursor, p_name, p_value );                              ●We parse only if we need to.
   end if;
 end;                                                                                  ●We bind only if we need to.

                                                                                       ●We use dbms_sql.column_value_long to
  function substr_of( p_query in varchar2,
    p_from in number,
    p_for   in number,                                                                 extract a requested portion of the selected
    p_name1 in varchar2 default NULL, p_bind1 in varchar2 default NULL,                LONG column
    p_name2 in varchar2 default NULL, p_bind2 in varchar2 default NULL,
                                                                                       ●We use invoker rights for this package
    p_name3 in varchar2 default NULL, p_bind3 in varchar2 default NULL,
    p_name4 in varchar2 default NULL, p_bind4 in varchar2 default NULL ) return
varchar2 as
    l_buffer       varchar2(4000);
    l_buffer_len   number;
  begin
    if ( nvl(p_from,0) <= 0 )then
          raise_application_error(-20002, 'From must be >= 1 (positive numbers)' );
    end if;                                                                           create or replace package long_help authid current_user is
    if ( nvl(p_for,0) not between 1 and 4000 )then                                      function substr_of(
      raise_application_error(-20003, 'For must be between 1 and 4000' );                 p_query in varchar2,
    end if;                                                                               p_from in number,
                                                                                          p_for   in number,
   if ( p_query <> g_query or g_query is NULL )then                                       p_name1 in varchar2 default NULL,
     if ( upper(trim(nvl(p_query,'x'))) not like 'SELECT%')then                           p_bind1 in varchar2 default NULL,
       raise_application_error(-20001, 'This must be a select only' );                    p_name2 in varchar2 default NULL,
     end if;                                                                              p_bind2 in varchar2 default NULL,
     dbms_sql.parse( g_cursor, p_query, dbms_sql.native );                                p_name3 in varchar2 default NULL,
                                                                                          p_bind3 in varchar2 default NULL,
     g_query := p_query;
                                                                                          p_name4 in varchar2 default NULL,
   end if;                                                                                p_bind4 in varchar2 default NULL
   bind_variable( p_name1, p_bind1 );                                                   )return varchar2;
   bind_variable( p_name2, p_bind2 );                                                 end long_help;
   bind_variable( p_name3, p_bind3 );
   bind_variable( p_name4, p_bind4 );

    dbms_sql.define_column_long(g_cursor, 1);
    if (dbms_sql.execute_and_fetch(g_cursor)>0)then
      dbms_sql.column_value_long(g_cursor, 1, p_for, p_from-1, l_buffer,
l_buffer_len);
    end if;
    return l_buffer;
  end substr_of;
end long_help;


                             LONG\long_help.pck
Searching into view definition




          LONG\Search_Longs.sql
Searching into TRIGGERS




      LONG\Search_Triggers.sql
Partition's HIGH VALUE




    LONG\Identify_Partition_HV.sql
Row Generation




   RowGen\row_generate.sql




    RowGen\Last_N_Years
                                  Aggregate Statistics...
                                                                   We want to generate statistics in
                                                                   BET_LEAGUE_STAT from 01.08 till now.
                                                                   Modification_Time shows when for the
                                                                   statistics has been generated for the last
                                                                   time.




The statistics is grouped per day, so
we need resultset giving us all Dates
in that period




                                        RowGen\generate_statistics.pdc
                    ...W/O losing any modifications
C_PB_BEGINING constant date := to_date('01.08.2006', 'DD.MM.YYYY');
gv_This_Aggregation date;

begin
  select nvl(MIN(to_date(tr.START_TIME, 'MM/DD/YY HH24:MI:SS')), sysdate) into gv_This_Aggregation from
v$transaction tr;

  for rec_Date in (
    select last_aggregated+level-1 for_Date
    from (select nvl(trunc(max(bsc.MODIFICATON_TIME)), C_PB_BEGINING) last_aggregated from bet_stat_cust bsc)
    connect by last_aggregated+level-1 <= trunc(gv_This_Aggregation)
    order by for_date desc
  )loop
     GENERATE_LEAGUE_STATS_VBS(rec_Date.for_Date);
  end loop;
end;
/


  ------------------------------------------------------
  --- fills in the league statistics for virtual bets per day
  function GENERATE_LEAGUE_STATS_VBS (i_for_date DATE) return number IS
  BEGIN
    delete adm.bet_league_stat bls where bls.month = i_for_date and bls.type = C_STAT_TYPE_VBS;
    insert into adm.bet_league_stat(
       month, bs_parent_id, bs_league,
       bs_etype, type, bs_stake, bs_payout,
       modificaton_time
     )
    select i_for_date, League.Cat_Parent_Id bs_Parent_ID, League.Cat_Name_En bs_league,
       ett.ogr_name, C_STAT_TYPE_VBS, sum(bc.bet_stake), sum(bc.bet_return), gv_This_Aggregation
    from bet.bet_child bc, bet.bet b, bet.outcome o, bet.gb_event e, bet.event_type_text ett, bet.catalog c,
bet.catalog League, bet.t_user u
    where bc.bet_settle_time >= i_for_date and bc.bet_settle_time < i_for_date+1
       and b.bet_id = bc.bet_id and u.id = b.bet_customer_id and u.type != 'T'
       and o.o_id = bc.o_id and e.gb_id = o.o_gb_event_id and ett.ogr_id(+) = e.gb_ogr_id and ett.lang_id(+) =
'EN'
       and c.cat_id = e.gb_egroup_id and League.cat_id = c.cat_parent_id
    group by League.Cat_Parent_Id, League.Cat_Name_En, ett.ogr_name
     ;
    return sql%rowcount;
  END;


                                               RowGen\generate_statistics.pdc
v$transactions helps us not to loose any
updates started just before statistics
aggregation begun
Different Timezones support




        TZ\allowed_TZ.sql
c_Timestamp_
Local_TZ is
timestamp.
It is converted
to DB
timezone.
Only for the
same session
TZ it is actually
the same as
other date
columns,
because there
was no
conversion....
C_Timestamp_Local_TZ for Sofia's users, however contains the actual time in Sofia...




                       ...As well as those from Berlin

								
To top