alter session set statistics_level = all; create bitmap index tgt_cust_bidx on tgt(cust) local; insert into tgt select level, level, trunc(sysdate), level from dual connect by level < 10000; exec dbms_stats.gather_table_stats(user,'TGT'); exec dbms_stats.gather_table_stats(user,'STG'); merge /*+index (tgt tgt_cust_bidx)*/into tgt using (select cust, prd, dt, num from stg) sou on (tgt.dt=sou.dt and tgt.cust=sou.cust and tgt.prd=sou.prd) when matched then update set tgt.num=tgt.num+sou.num when not matched then insert (tgt.cust,tgt.prd,tgt.dt,tgt.num) values(sou.cust,sou.cust,sou.dt,sou.num) ; select * from table(dbms_xplan.display_cursor(format=>'allstats last +partition')); ------------------------------------------------------------------------------------------------------------------------------------ | Id | Operation | Name | Starts | E-Rows | Pstart| Pstop | A-Rows | A-Time | Buffers | ------------------------------------------------------------------------------------------------------------------------------------ | 0 | MERGE STATEMENT | | 1 | | | | 0 |00:00:00.10 | 26 | | 1 | MERGE | TGT | 1 | | | | 0 |00:00:00.10 | 26 | | 2 | PX COORDINATOR | | 1 | | | | 4 |00:00:00.10 | 22 | | 3 | PX SEND QC (RANDOM) | :TQ10000 | 0 | 2 | | | 0 |00:00:00.01 | 0 | | 4 | VIEW | | 0 | | | | 0 |00:00:00.01 | 0 | | 5 | NESTED LOOPS OUTER | | 0 | 2 | | | 0 |00:00:00.01 | 0 | | 6 | PX BLOCK ITERATOR | | 0 | 2 | 1 | 4 | 0 |00:00:00.01 | 0 | |* 7 | TABLE ACCESS FULL | STG | 0 | 2 | 1 | 4 | 0 |00:00:00.01 | 0 | | 8 | PARTITION RANGE ITERATOR | | 0 | 1 | KEY | KEY | 0 |00:00:00.01 | 0 | | 9 | PARTITION HASH ITERATOR | | 0 | 1 | KEY | KEY | 0 |00:00:00.01 | 0 | |* 10 | TABLE ACCESS BY LOCAL INDEX ROWID| TGT | 0 | 1 | | | 0 |00:00:00.01 | 0 | | 11 | BITMAP CONVERSION TO ROWIDS | | 0 | | | | 0 |00:00:00.01 | 0 | |* 12 | BITMAP INDEX SINGLE VALUE | TGT_CUST_BIDX | 0 | | | | 0 |00:00:00.01 | 0 | ------------------------------------------------------------------------------------------------------------------------------------ Predicate Information (identified by operation id): --------------------------------------------------- 7 - access(:Z>=:Z AND :Z<=:Z) 10 - filter(("TGT"."PRD"="PRD" AND "TGT"."DT"="DT")) 12 - access("TGT"."CUST"="CUST")It was possible to add partition information to the format of display_cursor How about putting more lines to the source table? Next post about that...
2012-04-30
Partition wise merge -continued
Continuing the Partition wise merge post. Putting more lines to target and creating a bitmap index. The note last was that the plan strategy was nested loops. Indexing the access path to tgt at the end of plan makes possibly the execution do less.
2012-04-29
Partition wise merge
Having a sub partitioned target table and a staging table that should be merged into the target. Will a partition wise merge work.
How about partition wising. The plan join strategy is nested loops joining so is it actually doing stuff partition wise? This example was constructed with small amount of lines, but how about more lines and stuff to do. To be continued...
drop table tgt; drop table stg; create table tgt(cust int, prd int, dt date, num int) partition by range (dt) subpartition by hash (prd) (partition p1 values less than (to_date(20120101,'yyyymmdd')) subpartitions 4,partition pmax values less than (maxvalue) subpartitions 4); insert into tgt values(1,1,to_date(20110101,'yyyymmdd'),1); create table stg(cust int, prd int, dt date, num int) partition by hash (prd) partitions 4 parallel 4; insert into stg select level, level, trunc(sysdate), level from dual connect by level < 3; select * from stg; 1 1 29.04.2012 00:00:00 1 2 2 29.04.2012 00:00:00 2 select * from tgt; 1 1 01.01.2011 00:00:00 1 alter session set statistics_level = all; merge into tgt using (select cust, prd, dt, num from stg) sou on (tgt.dt=sou.dt and tgt.cust=sou.cust and tgt.prd=sou.prd) when matched then update set tgt.num=tgt.num+sou.num when not matched then insert (tgt.cust,tgt.prd,tgt.dt,tgt.num) values(sou.cust,sou.cust,sou.dt,sou.num) ;Just ask how did it execute...
select * from table(dbms_xplan.display_cursor(format=>'allstats last')); ---------------------------------------------------------------------------------------------------- | Id | Operation | Name | Starts | E-Rows | A-Rows | A-Time | Buffers | ---------------------------------------------------------------------------------------------------- | 0 | MERGE STATEMENT | | 1 | | 0 |00:00:00.02 | 84 | | 1 | MERGE | TGT | 1 | | 0 |00:00:00.02 | 84 | | 2 | PX COORDINATOR | | 1 | | 4 |00:00:00.02 | 24 | | 3 | PX SEND QC (RANDOM) | :TQ10000 | 0 | 4 | 0 |00:00:00.01 | 0 | | 4 | VIEW | | 0 | | 0 |00:00:00.01 | 0 | | 5 | NESTED LOOPS OUTER | | 0 | 4 | 0 |00:00:00.01 | 0 | | 6 | PX BLOCK ITERATOR | | 0 | 4 | 0 |00:00:00.01 | 0 | |* 7 | TABLE ACCESS FULL | STG | 0 | 4 | 0 |00:00:00.01 | 0 | | 8 | PARTITION RANGE ITERATOR| | 0 | 1 | 0 |00:00:00.01 | 0 | | 9 | PARTITION HASH ITERATOR| | 0 | 1 | 0 |00:00:00.01 | 0 | |* 10 | TABLE ACCESS FULL | TGT | 0 | 1 | 0 |00:00:00.01 | 0 | ---------------------------------------------------------------------------------------------------- Predicate Information (identified by operation id): --------------------------------------------------- 7 - access(:Z>=:Z AND :Z<=:Z) 10 - filter(("TGT"."PRD"="PRD" AND "TGT"."CUST"="CUST" AND "TGT"."DT"="DT"))Well that did not tell us anything about partitioning. How about without allstats.
merge into tgt using (select cust, prd, dt, num from stg) sou on (tgt.dt=sou.dt and tgt.cust=sou.cust and tgt.prd=sou.prd) when matched then update set tgt.num=tgt.num+sou.num when not matched then insert (tgt.cust,tgt.prd,tgt.dt,tgt.num) values(sou.cust,sou.cust,sou.dt,sou.num) ; select * from table(dbms_xplan.display_cursor()); --------------------------------------------------------------------------------------------------------------------------------------- | Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time | Pstart| Pstop | TQ |IN-OUT| PQ Distrib | --------------------------------------------------------------------------------------------------------------------------------------- | 0 | MERGE STATEMENT | | | | 4 (100)| | | | | | | | 1 | MERGE | TGT | | | | | | | | | | | 2 | PX COORDINATOR | | | | | | | | | | | | 3 | PX SEND QC (RANDOM) | :TQ10000 | 16 | 1728 | 4 (0)| 00:00:01 | | | Q1,00 | P->S | QC (RAND) | | 4 | VIEW | | | | | | | | Q1,00 | PCWP | | | 5 | NESTED LOOPS OUTER | | 16 | 1728 | 4 (0)| 00:00:01 | | | Q1,00 | PCWP | | | 6 | PX BLOCK ITERATOR | | 16 | 768 | 2 (0)| 00:00:01 | 1 | 8 | Q1,00 | PCWC | | |* 7 | TABLE ACCESS FULL | STG | 16 | 768 | 2 (0)| 00:00:01 | 1 | 8 | Q1,00 | PCWP | | | 8 | PARTITION RANGE ITERATOR| | 1 | 60 | 1 (0)| 00:00:01 | KEY | KEY | Q1,00 | PCWP | | | 9 | PARTITION HASH ITERATOR| | 1 | 60 | 1 (0)| 00:00:01 | KEY | KEY | Q1,00 | PCWP | | |* 10 | TABLE ACCESS FULL | TGT | 1 | 60 | 1 (0)| 00:00:01 | | | Q1,00 | PCWP | | --------------------------------------------------------------------------------------------------------------------------------------- Predicate Information (identified by operation id): --------------------------------------------------- 7 - access(:Z>=:Z AND :Z<=:Z) 10 - filter(("TGT"."PRD"="PRD" AND "TGT"."CUST"="CUST" AND "TGT"."DT"="DT"))Seems more informative about partitioning. There you can see pstart and pstop. Seems like staging table partitioning matches target table sub partitioning. Yet another approach is monitoring.
merge into /*+monitor*/ tgt using (select cust, prd, dt, num from stg) sou on (tgt.dt=sou.dt and tgt.cust=sou.cust and tgt.prd=sou.prd) when matched then update set tgt.num=tgt.num+sou.num when not matched then insert (tgt.cust,tgt.prd,tgt.dt,tgt.num) values(sou.cust,sou.cust,sou.dt,sou.num) ; select dbms_sqltune.report_sql_monitor(sql_id,sid,session_serial#) from v$sql_monitor m where sid=userenv('sid') order by last_refresh_time desc;And the latest line there tells us
SQL Monitoring Report SQL Text ------------------------------ merge into /*+monitor*/ tgt using (select cust, prd, dt, num from stg) sou on (tgt.dt=sou.dt and tgt.cust=sou.cust and tgt.prd=sou.prd) when matched then update set tgt.num=tgt.num+sou.num when not matched then insert (tgt.cust,tgt.prd,tgt.dt,tgt.num) values(sou.cust,sou.cust,sou.dt,sou.num) Global Information ------------------------------ Status : DONE Instance ID : 1 Session : RAFU (140:5746) SQL ID : cqakadxwrc3yz SQL Execution ID : 16777221 Execution Started : 04/29/2012 00:01:02 First Refresh Time : 04/29/2012 00:01:02 Last Refresh Time : 04/29/2012 00:01:02 Duration : .004028s Module/Action : SQL Developer/- Service : SYS$USERS Program : SQL Developer Global Stats =============================== | Elapsed | Other | Buffer | | Time(s) | Waits(s) | Gets | =============================== | 0.01 | 0.01 | 58 | =============================== Parallel Execution Details (DOP=4 , Servers Allocated=4) ================================================================================ | Name | Type | Server# | Elapsed | Other | Buffer | Wait Events | | | | | Time(s) | Waits(s) | Gets | (sample #) | ================================================================================ | PX Coordinator | QC | | 0.00 | 0.00 | 14 | | | p000 | Set 1 | 1 | 0.00 | 0.00 | 16 | | | p001 | Set 1 | 2 | 0.00 | 0.00 | 9 | | | p002 | Set 1 | 3 | 0.00 | 0.00 | 10 | | | p003 | Set 1 | 4 | 0.00 | 0.00 | 9 | | ================================================================================ SQL Plan Monitoring Details (Plan Hash Value=659647117) ========================================================================================================================================= | Id | Operation | Name | Rows | Cost | Time | Start | Execs | Rows | Activity | Activity Detail | | | | | (Estim) | | Active(s) | Active | | (Actual) | (%) | (# samples) | ========================================================================================================================================= | 0 | MERGE STATEMENT | | | | 1 | +0 | 1 | 1 | | | | 1 | MERGE | TGT | | | 1 | +0 | 1 | 1 | | | | 2 | PX COORDINATOR | | | | 1 | +0 | 5 | 2 | | | | 3 | PX SEND QC (RANDOM) | :TQ10000 | 2 | 3 | 1 | +0 | 4 | 2 | | | | 4 | VIEW | | | | 1 | +0 | 4 | 2 | | | | 5 | NESTED LOOPS OUTER | | 2 | 3 | 1 | +0 | 4 | 2 | | | | 6 | PX BLOCK ITERATOR | | 2 | 2 | 1 | +0 | 4 | 2 | | | | 7 | TABLE ACCESS FULL | STG | 2 | 2 | 1 | +0 | 10 | 2 | | | | 8 | PARTITION RANGE ITERATOR | | 1 | 3 | 1 | +0 | 2 | 2 | | | | 9 | PARTITION HASH ITERATOR | | 1 | 3 | 1 | +0 | 2 | 2 | | | | 10 | TABLE ACCESS FULL | TGT | 1 | 3 | 1 | +0 | 2 | 2 | | | =========================================================================================================================================Parallel four of the stg table was obeyed(DOP=4). There was only two lines to merge. Processes p000 and p002 did more than others. Seems like partition wise parallel merge is possible even in this case when source partitioning matches target sub partitioning.
How about partition wising. The plan join strategy is nested loops joining so is it actually doing stuff partition wise? This example was constructed with small amount of lines, but how about more lines and stuff to do. To be continued...
2012-04-26
Overlapping Locator approach
I have been writing earlier about handling overlapping time periods. Those approaches are trying to deal populating rows without mutating table problems and compound triggers. In this post there is a query example dealing with a solution possibility while dealing with such data. But first a list of earlier posts about the issue:
- not overlapping
- MV approach
- exclusion constraints
- Packing rows
- joining ranges
- sum over time
- not overlapping daily
drop table z; CREATE TABLE Z(V VARCHAR2(10) NOT NULL , VALIDFROM date NOT NULL , VALIDTILL date NOT NULL , CONSTRAINT FRO2000 CHECK (to_date('20000101','yyyymmdd') < VALIDFROM) , CONSTRAINT TIL2050 CHECK (VALIDTILL <= to_date('20510101','yyyymmdd')) , CONSTRAINT FROTIL CHECK (VALIDFROM <= VALIDTILL) ); INSERT INTO Z VALUES('meet',to_date('20010101','yyyymmdd'),to_date('20110101','yyyymmdd')); INSERT INTO Z VALUES('meet',to_date('20110101','yyyymmdd'),to_date('20130101','yyyymmdd')); INSERT INTO Z VALUES('overlap',to_date('20010101','yyyymmdd'),to_date('20110101','yyyymmdd')); INSERT INTO Z VALUES('overlap',to_date('20050101','yyyymmdd'),to_date('20130101','yyyymmdd')); INSERT INTO Z VALUES('disjoint',to_date('20010101','yyyymmdd'),to_date('20050101','yyyymmdd')); INSERT INTO Z VALUES('disjoint',to_date('20110101','yyyymmdd'),to_date('20130101','yyyymmdd')); COMMIT;Creating a function that constructs SDO_GEOMETRY object out of periods.
CREATE OR REPLACE FUNCTION TF(FRO DATE, TIL DATE) RETURN SDO_GEOMETRY deterministic as BEGIN RETURN MDSYS.SDO_GEOMETRY(2002,NULL, NULL, SDO_ELEM_INFO_ARRAY (1,2,1), SDO_ORDINATE_ARRAY(to_number(to_char(FRO,'j')),0,to_number(to_char(TIL,'j')),0)); END; /A query that groups the periods of different v values. When the aggregated SDO_GEOMETRY object is a line SDO_GTYPE=2002 the requirement is fine. Otherwise it is a multiline SDO_GTYPE=2006.
select v , min(validfrom) mi , max(validtill) ma , SDO_AGGR_CONCAT_LINES(tf(validfrom,validtill)).SDO_GTYPE gt from z group by v ; v mi ma gt disjoint 1.1.2001 1.1.2013 2006 meet 1.1.2001 1.1.2013 2002 overlap 1.1.2001 1.1.2013 2006The violations of the rule are multilines. The aggregate function may be used in having part of a query.
select v , min(validfrom) , max(validtill) , SDO_AGGR_CONCAT_LINES(tf(validfrom,validtill)).SDO_GTYPE gt from z group by v having SDO_AGGR_CONCAT_LINES(tf(validfrom,validtill)).SDO_GTYPE = 2006 ;
2012-04-20
Making a schema empty
A thing to a future Oracle version wish list. Schema level privileges. I mean way to grant select on all tables on a schema. These grants should become grants to the tables created afterwards. SQL Server has such possibility since 2008.
One can find drop all schema objects scripts and discussions. This way you preserve the grants given to your database user. And no need to issue drop user cascade. Sometimes there is only a need to make all schema tables empty. After dropping tables you loose the grants given to others. Disabling foreign key constraints, truncating tables and enabling constraints may be a way to do this. If your tables are small truncating may do more work than just deleting. Also deleting requires foreign keys to be considered. One way is to disable them, delete and enable constraints. But another way could be deleting the rows in the order foreign keys are defined. Here is an anonymous block deleting all rows from all tables in a schema. It starts deleting rows from tables that are not referenced from another tables. And level by level in the foreign key tree deletes all rows from tables. You probably do not want to run this in a schema with huge tables.
declare perform_execute boolean; procedure putline_or_execute(s varchar2, perform_execute boolean) is begin if perform_execute = true then begin execute immediate s; exception when others then dbms_output.put_line('FAILED: '||s); raise; end; else dbms_output.put_line(s||';'||chr(10)); end if; end putline_or_execute; begin perform_execute := true; for com in ( with cte(table_name,constraint_name,r_table_name,r_constraint_name,deferred) as ( select r.table_name,r.constraint_name,p.table_name,p.constraint_name,r.DEFERRED from user_constraints r inner join user_constraints p on p.constraint_name = r.r_constraint_name where r.constraint_type = 'R' and r.deferred = 'IMMEDIATE' and r.status = 'ENABLED' ), hcte(table_name,constraint_name,r_table_name,r_constraint_name,lvl) as ( select null,null,e.table_name,null,1 from user_tables e where e.iot_name is null and e.table_name not in (select r_table_name from cte) union all select pa.table_name,pa.constraint_name,pa.r_table_name,pa.r_constraint_name , ch.lvl+1 from cte pa inner join hcte ch on pa.table_name = ch.r_table_name ) SEARCH breadth FIRST BY table_name set dep select 'delete from '||r_table_name statem,max(lvl) mx from hcte group by r_table_name order by max(lvl),r_table_name ) loop putline_or_execute(com.statem, perform_execute); end loop; end; /
2012-04-04
Miss leading ORA-44425
Error message for ORA-44425 is miss leading.
There is also option of STORE AS OBJECT RELATIONAL. Then you need to deal with ORA-19002: Missing XMLSchema URL. The same properties page has XML Schema URL and XML Schema Elem properties to be filled. Once filled the generated DDL throws an error ORA-31159: XML DB is in an invalid state. Maybe this error should be the same as ORA-44425.
The latest 11.2.0.3 create table XMLType_column_properties documentation instructs not to use CLOB storage as it might be deprecated in a future release.
Maybe it would be wise just to install the XML DB if a XMLType column is used. There is many kinds of XML out there. Unfortunately so often no knowledge of schema used. In this case maybe CLOB store is ok. No need to install XML DB and make Oracle patching take longer.
ORA-44425: XML DB needs to be installed for creating an XMLType table or column Cause: An attempt was made to create an XMLType table or column with out XML DB installed.The correct information in the message is that you do not have XML DB installed in your database. The fact is that you are trying to store your XMLType column as BINARY XML. You actually do not have to have XML DB installed to use XMLType column. SQL Developer generates binary option to DDL for a XMLType column by default.
CREATE TABLE TABLE_1 ( Column_1 XMLTYPE ) XMLTYPE COLUMN Column_1 STORE AS BINARY XML ;You have the option to STORE as CLOB.
CREATE TABLE TABLE_1 ( Column_1 XMLTYPE ) XMLTYPE COLUMN Column_1 STORE AS CLOB XML ;If you need BINARY stored XML install XML DB option to your database as the error message states. Otherwise you have the option to change the store method to database default CLOB. This may be found from Relational Models - Physical Models - Tables - TABLE_1 - Columns - Column_1 - Right click mouse - Properties - Store as - CLOB
There is also option of STORE AS OBJECT RELATIONAL. Then you need to deal with ORA-19002: Missing XMLSchema URL. The same properties page has XML Schema URL and XML Schema Elem properties to be filled. Once filled the generated DDL throws an error ORA-31159: XML DB is in an invalid state. Maybe this error should be the same as ORA-44425.
The latest 11.2.0.3 create table XMLType_column_properties documentation instructs not to use CLOB storage as it might be deprecated in a future release.
Maybe it would be wise just to install the XML DB if a XMLType column is used. There is many kinds of XML out there. Unfortunately so often no knowledge of schema used. In this case maybe CLOB store is ok. No need to install XML DB and make Oracle patching take longer.
2012-04-02
Keep first
Green is good. In database console it is saying that the database is doing work with CPU. It is not waiting.
It might be thou that the end user is waiting for the query to finnish. These problems are not to be dealt with buying more CPU.
Maybe faster CPU. But more performance may be gained by not doing stuff.
drop table a cascade constraints; create table a as select mod(level,5) b,level c,'foo'||level d from dual connect by level < 100000; exec dbms_stats.gather_table_stats(user,'A'); select * from a where rownum <= 10; 1 1 foo1 2 2 foo2 3 3 foo3 4 4 foo4 0 5 foo5 1 6 foo6 2 7 foo7 3 8 foo8 4 9 foo9 0 10 foo10A simple aggregation from the data:
select b,min(c) c from a group by b ; Plan SELECT STATEMENT ALL_ROWSCost: 92 Bytes: 40 Cardinality: 5 2 HASH GROUP BY Cost: 92 Bytes: 40 Cardinality: 5 1 TABLE ACCESS FULL TABLE RAFU.A Cost: 88 Bytes: 799.992 Cardinality: 99.999 1 1 2 2 4 4 3 3 0 5But when we want to have also d columns included in the result of aggregation we need to write sql a bit differently.
0 5 foo5 1 1 foo1 2 2 foo2 3 3 foo3 4 4 foo4Here is a analytic approach to get the desired rows.
select b,c,d from ( select b,c,d, row_number()over(partition by b order by c) rn from a ) where rn=1 ; Plan SELECT STATEMENT ALL_ROWSCost: 643 Bytes: 12.699.873 Cardinality: 99.999 3 VIEW RAFU. Filter Predicates: "RN"=1 Cost: 643 Bytes: 12.699.873 Cardinality: 99.999 2 WINDOW SORT PUSHED RANK Filter Predicates: ROW_NUMBER() OVER ( PARTITION BY "B" ORDER BY "C")<=1 Cost: 643 Bytes: 1.699.983 Cardinality: 99.999 1 TABLE ACCESS FULL TABLE RAFU.A Cost: 88 Bytes: 1.699.983 Cardinality: 99.999 ;The analytic way is sorting all rows and restricting rows after. How about collecting the needed information while aggregating. An alternative using keep and first reserved words in SQL.
select b,min(c) c,min(d) keep (dense_rank first order by c) dd from a group by b ; Plan SELECT STATEMENT ALL_ROWSCost: 93 Bytes: 85 Cardinality: 5 2 SORT GROUP BY Cost: 93 Bytes: 85 Cardinality: 5 1 TABLE ACCESS FULL TABLE RAFU.A Cost: 88 Bytes: 1.699.983 Cardinality: 99.999Plans are taken from Toad. The cardinality information there is more accurate in the aggregate version. This may have huge influence if such code is a part of a bigger query. Comparing analytic to aggregate keep first approach we get 77% improvement in execution time. It is all CPU time. Using Tom Kyte runstats.
DECLARE CURSOR c_rn IS select b,c,d from ( select b,c,d, row_number()over(partition by b order by c) rn from a ) where rn=1 ; -- CURSOR c_keepfirst IS select b,min(c) c,min(d) keep (dense_rank first order by c) dd from a group by b ; -- BEGIN runstats_pkg.rs_start; FOR i IN 1 .. 100 LOOP FOR rec IN c_rn LOOP NULL; END LOOP; END LOOP; runstats_pkg.rs_middle; FOR i IN 1 .. 100 LOOP FOR rec IN c_keepfirst LOOP NULL; END LOOP; END LOOP; runstats_pkg.rs_stop; END; / STAT...CPU used by this sessio 1,872 426 -1,446 STAT...recursive cpu usage 1,872 424 -1,448 STAT...Elapsed Time 1,873 424 -1,449
Subscribe to:
Posts (Atom)
About Me
- Rafu
- I am Timo Raitalaakso. I have been working since 2001 at Solita Oy as a Senior Database Specialist. My main focus is on projects involving Oracle database. Oracle ACE alumni 2012-2018. In this Rafu on db blog I write some interesting issues that evolves from my interaction with databases. Mainly Oracle.