Goldengate Best Parameters & TEST from Maclean

Sample
drop user ogg_maclean cascade;
create user ogg_maclean identified by oracle;
alter user ogg_maclean default tablespace users;
grant connect,resource to OGG_MACLEAN;

OGG_MACLEAN

maclean_press

conn ogg_maclean/oracle
create table maclean_press1(a int constraint key1 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press2(a int constraint key2 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press3(a int constraint key3 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press4(a int constraint key4 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press5(a int constraint key5 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press6(a int constraint key6 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press7(a int constraint key7 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press8(a int constraint key8 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press9(a int constraint key9 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);
create table maclean_press10(a int constraint key10 primary key,b varchar2(100),c varchar2(100),d date default sysdate,e date);

create or replace procedure trouble_ogg_sql as 
begin 
    for i in 1..2000000 loop
        insert into maclean_press1(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press2(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press3(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press4(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press5(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press6(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press7(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press8(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press9(a,b,c,d) values(i,i,i,sysdate);
        insert into maclean_press10(a,b,c,d) values(i,i,i,sysdate);
        if mod(i,2000)=0 then
            commit;
        end if;
    end loop;

        delete from maclean_press1 where a <= 10000;
         commit;
EXECUTE IMMEDIATE('alter table maclean_press1 move');
EXECUTE IMMEDIATE('alter index key1 rebuild');
        delete from maclean_press2 where a <= 10000;
        commit;
EXECUTE IMMEDIATE('alter table maclean_press2 move');
EXECUTE IMMEDIATE('alter index key2 rebuild');
delete from maclean_press3 where a <= 10000;
        commit;
EXECUTE IMMEDIATE('alter table maclean_press3 move');
EXECUTE IMMEDIATE('alter index key3 rebuild');
delete from maclean_press4 where a <= 10000;
    commit;        
EXECUTE IMMEDIATE('alter table maclean_press4 move');
EXECUTE IMMEDIATE('alter index key4 rebuild');
delete from maclean_press5 where a <= 10000;
commit;
EXECUTE IMMEDIATE('alter table maclean_press5 move');
EXECUTE IMMEDIATE('alter index key5 rebuild');
delete from maclean_press6 where a <= 10000;
commit;
EXECUTE IMMEDIATE('alter table maclean_press6 move');
EXECUTE IMMEDIATE('alter index key6 rebuild');
delete from maclean_press7 where a <= 10000;
commit;
EXECUTE IMMEDIATE('alter table maclean_press7 move');
EXECUTE IMMEDIATE('alter index key7 rebuild');
delete from maclean_press8 where a <= 10000;
commit;
EXECUTE IMMEDIATE('alter table maclean_press8 move');
EXECUTE IMMEDIATE('alter index key8 rebuild');
delete from maclean_press9 where a <= 10000;
commit;
EXECUTE IMMEDIATE('alter table maclean_press9 move');
EXECUTE IMMEDIATE('alter index key9 rebuild');
delete from maclean_press10 where a <= 10000;
        commit;
EXECUTE IMMEDIATE('alter table maclean_press10 move');
EXECUTE IMMEDIATE('alter index key10 rebuild');

for i in 1..1 loop
        update maclean_press1 set b=b+1 where a <= 20000;
         commit;
        update maclean_press2 set b=b+1 where a <= 20000;
        commit;
        update maclean_press3 set b=b+1 where a <= 20000;
        commit;
        update maclean_press4 set b=b+1 where a <= 20000;
    commit;        
        update maclean_press5 set b=b+1 where a <= 20000;
commit;
        update maclean_press6 set b=b+1 where a <= 20000;
commit;
        update maclean_press7 set b=b+1 where a <= 20000;
commit;
        update maclean_press8 set b=b+1 where a <= 20000;
commit;
        update maclean_press9 set b=b+1 where a <= 20000;
commit;
        update maclean_press10 set b=b+1 where a <= 20000;
        commit;
end loop;

for i in 1..1 loop
        delete from maclean_press1 where a > 30000 and a <= 40000;
         commit;
        delete from maclean_press2 where a > 30000 and a <= 40000;
        commit;
delete from maclean_press3 where a > 30000 and a <= 40000;
        commit;
delete from maclean_press4 where a > 30000 and a <= 40000;
    commit;        
delete from maclean_press5 where a > 30000 and a <= 40000;
commit;
delete from maclean_press6 where a > 30000 and a <= 40000;
commit;
delete from maclean_press7 where a > 30000 and a <= 40000;
commit;
delete from maclean_press8 where a > 30000 and a <= 40000;
commit;
delete from maclean_press9 where a > 30000 and a <= 40000;
commit;
delete from maclean_press10 where a > 30000 and a <= 40000;
        commit;
end loop;
end;
/

exec  ogg_maclean.trouble_ogg_sql;

select count(*),sum(a),sum(b) from maclean_press1;
select count(*),sum(a),sum(b) from maclean_press2;
select count(*),sum(a),sum(b) from maclean_press3;
select count(*),sum(a),sum(b) from maclean_press4;
select count(*),sum(a),sum(b) from maclean_press5;
select count(*),sum(a),sum(b) from maclean_press6;
select count(*),sum(a),sum(b) from maclean_press7;
select count(*),sum(a),sum(b) from maclean_press8;
select count(*),sum(a),sum(b) from maclean_press9;
select count(*),sum(a),sum(b) from maclean_press10;

主键更新测试(针对Quest)

create table tb1a (id number primary key, name varchar(30));
执行以下sql进行主键更新:
Begin
for i in 1..1000 loop
insert into tb1a values (i, 'aaa');
end loop;
commit;
update tb1a set id=id+100;
commit;
end;
/

select min(id),max(id),sum(id) from tb1a;

分区表

 create table maclean_partition (acct_no number(12),person varchar2(30),week_no number(2)) partition by range (week_no) 
(partition jan values less than(4),partition feb values less than(8),partition others values less than (maxvalue)) enable row movement;

  insert into maclean_partition values(1,'a',2);
 insert into maclean_partition values(2,'b',6);
 insert into maclean_partition values(3,'c',10);
 insert into maclean_partition values(6,'d',6);
 insert into maclean_partition values(8,'e',8);
 insert into maclean_partition values(9,'e',9);
 commit;
 update maclean_partition set person='d' where acct_no=2;
 commit;
 delete maclean_partition where acct_no=1;
 commit;

==========================================================================================================

ASM 的 tnsnames.ora 以及配置监听静态注册

===========================================================================================================

解压软件,配置ogg.sh 

export  GG_HOME=/goldengate
export  LD_LIBRARY_PATH=$ORACLE_HOME/lib   

    GGSCI> create subdirs

打开归档

Select log_mode from v$database;

shutdown immediate;
startup mount;
alter database archivelog;
alter database open;

Select  
SUPPLEMENTAL_LOG_DATA_MIN
,SUPPLEMENTAL_LOG_DATA_PK
,SUPPLEMENTAL_LOG_DATA_UI
,SUPPLEMENTAL_LOG_DATA_FK
,SUPPLEMENTAL_LOG_DATA_ALL from v$database;

alter database add supplemental log data ;
--alter database add supplemental log data (primary key, unique,foreign key) columns;
alter system switch logfile;

Select  
SUPPLEMENTAL_LOG_DATA_MIN
,SUPPLEMENTAL_LOG_DATA_PK
,SUPPLEMENTAL_LOG_DATA_UI
,SUPPLEMENTAL_LOG_DATA_FK
,SUPPLEMENTAL_LOG_DATA_ALL from v$database;

Alter database force logging;

创建ogg 用户

--create tablespace
create tablespace goldengate datafile  size 1024M ;

-- Create the user 
create user goldengate identified by &A default tablespace goldengate;

-- Grant role privileges 
grant  resource, connect, dba to goldengate;

grant create any table to goldengate;
grant create any view to goldengate;
grant create any procedure to goldengate;
grant create any sequence to goldengate;
grant create any index to goldengate;
grant create any trigger to goldengate;
grant create any view to goldengate;

===========================================================================================================
[oracle@vrh1 ~]$ cat ogg.sh

export ORACLE_HOME=/s01/orabase/product/11.2.0/dbhome_1
export ORACLE_SID=VRAC1
export LD_LIBRARY_PATH=/ogg:$ORACLE_HOME/lib:$ORACLE_HOME/lib32
export PATH=/home/oracle/ogg:$PATH

cd /home/oracle/ogg
ggsci 

===========================================================================================================

./GLOBALS

GGSCHEMA goldengate
CheckpointTable goldengate.ckpt
UnlockedTrailFiles

===========================================================================================================

MGR

Port 7809
userid goldengate , password oracle
DYNAMICPORTLIST 9101-9356
CheckMinutes 10
PurgeOldExtracts ./dirdat/*, UseCheckpoints, MinKeepHours 96
PurgeMarkerHistory MinKeepDays 3, MaxKeepDays 7, FrequencyMinutes 120
AutoRestart ER *, WaitMinutes 5, Retries 3
LagInfoMinutes 0
LagReportMinutes 10

===========================================================================================================

源端DDL

cd ogg

 GRANT EXECUTE ON UTL_FILE TO goldengate;

  ALTER SYSTEM SET RECYCLEBIN =OFF SCOPE =BOTH;

11g中 只能ALTER SYSTEM SET RECYCLEBIN =OFF SCOPE =SPFILE; 之后重启

   @marker_setup.sql
    @ddl_setup.sql
    @role_setup.sql

    GRANT GGS_GGSUSER_ROLE TO goldengate;
     @ddl_enable.sql
     @ddl_pin.sql goldengate
     @sequence.sql

dblogin userid  goldengate ,     password oracle
add checkpointtable goldengate.ckpt

ADD TRANDATA XX.XX

===========================================================================================================

add extract ext01, tranlog , begin now , threads 2 
add exttrail ./dirdat/ml , extract ext01 , megabytes 200

extract ext01
SETENV (ORACLE_HOME="/s01/orabase/product/11.2.0/dbhome_1" )
SETENV (ORACLE_SID="VRAC1")
SETENV (NLS_LANG = "AMERICAN_AMERICA.AL32UTF8")
--TRANLOGOPTIONS EXCLUDEUSER <Replicat username>
--CacheMgr CacheDirectory ./dirtmp 51980MB, CacheDirectory ./dirtmp/goldengate_tmp
userid goldengate , password oracle
--TranLogOptions ExcludeUser goldengate
--TranLogOptions AltArchivedLogFormat Instance NETDB1 %t_%s_%r.dbf
--TranLogOptions AltArchivedLogFormat Instance NETDB2 %t_%s_%r.dbf
--TranLogOptions AltArchiveLogDest Primary Instance NETDB1 /arch1, AltArchiveLogDest Instance NETDB2 /arch2
tranlogoptions asmuser sys@ASM , ASMPASSWORD oracle
--TRANLOGOPTIONS DBLOGREADER, DBLOGREADER BUFSIZE [x], BUFSIZE [y]
ThreadOptions MaxCommitPropagationDelay 10000 IOLatency 3000
exttrail ./dirdat/ml
DDL Include ALL
--DDL INCLUDE ALL, EXCLUDE OBJNAME "<schema name>.<object name> "
DDLOptions AddTranData, Report
DDLOptions NoCrossRename, Report
Table ogg_maclean.*;

-- Prevent data looping. This is generally used in bi-directional
-- configuration
TRANLOGOPTIONS EXCLUDEUSER <Replicat username>
===========================================================================================================
add extract pump01, EXTTRAILSOURCE ./dirdat/ml 
add rmttrail ./dirdat/ml , extract pump01, megabytes 200

pump 

extract pump01
SETENV (ORACLE_HOME = "<Oracle home path>" )
SETENV (ORACLE_SID="<Oracle sid>")

passthru
rmthost 192.168.1.179 , mgrport 7809 
rmttrail ./dirdat/ml
--DISCARDFILE <diacard file path/<discard file name.dsc>, APPEND Megabytes <n>
table ogg_maclean.*;

===========================================================================================================

add replicat rep01, exttrail ./dirdat/ml

replicat 

replicat rep01
SETENV (ORACLE_HOME = "/s01/oracle/app/oracle/product/11.2.0/dbhome_1" )
SETENV (ORACLE_SID="PRODA")
SETENV (NLS_LANG ="American_America.AL32UTF8")
userid goldengate , password oracle
--HandleCollisions
AssumeTargetDefs
DiscardFile ./dirrpt/rep1.dsc, APPEND Megabytes 800 ,  Purge
DBOptions DeferrefConst
DBOptions SuppressTriggers    
MaxTransOps 10000
GroupTransOps 1000
SQLEXEC "ALTER SESSION SET COMMIT_WRITE = BATCH,NOWAIT"
BatchSQL
--DDL INCLUDE ALL, EXCLUDE OBJNAME "<schema name>.<object name> "
DDLOptions Report
DDLError 24344 Ignore
DDLError 4052 Ignore
DDLError 955 Ignore
DDLError 1408 Ignore
DDLError 911 Ignore
AllowNoOpUpdates
CheckSequenceValue
--IGNORETRUNCATES
--DEFERAPPLYINTERVAL 1 MINUTES
-- Sequence testgg.*, Target testgg.*
MapExclude ogg_maclean.SYS_EXPORT_SCHEMA* ;
map ogg_maclean.* , target ogg_maclean.* ;

--DDLOPTIONS SUPPRESSTRIGGERS
--The SUPPRESSTRIGGERS parameter prevents triggers from firing on 
target objects that are configured for replication with Oracle GoldenGate. 
This alleviates the need to manually disable triggers and constraints.  
To use this option, the Replicat user must be an Oracle Streams administrator 
which can be granted by invoking dbms_goldengate_auth.grant_admin_privilege.
Available FROM ORACLE 10.2.0.5 AND Later Patches and Oracle 11.2.0.2

================================================================================================

DROP TABLE gg_test;
CREATE TABLE gg_test
(
    a        number(10),
    b             VARCHAR20(30),
        PRIMARY KEY (a)
        );
4.8    100字段表测试脚本
create table table_100cols
(
a1        number(10) not null,
a2        number(10),
a3        number(10),
a4        number(10),
a5        number(10),
a6        number(10),
a7        number(10),
a8        number(10),
a9        number(10),
a10       number(10),
a11       number(10),
a12       number(10),
a13       number(10),
a14       number(10),
a15       number(10),
a16       number(10),
a17       number(10),
a18       number(10),
a19       number(10),
a20       number(10),
a21       number(10),
a22       number(10),
a23       number(10),
a24       number(10),
a25       number(10),
a26       number(10),
a27       number(10),
a28       number(10),
a29       number(10),
a30       number(10),
a31       number(10),
a32       number(10),
a33       number(10),
a34       number(10),
a35       number(10),
a36       number(10),
a37       number(10),
a38       number(10),
a39       number(10),
a40       number(10),
a41       number(10),
a42       number(10),
a43       number(10),
a44       number(10),
a45       number(10),
a46       number(10),
a47       number(10),
a48       number(10),
a49       number(10),
a50       number(10),
a51       number(10),
a52       number(10),
a53       number(10),
a54       number(10),
a55       number(10),
a56       number(10),
a57       number(10),
a58       number(10),
a59       number(10),
a60       number(10),
a61       number(10),
a62       number(10),
a63       number(10),
a64       number(10),
a65       number(10),
a66       number(10),
a67       number(10),
a68       number(10),
a69       number(10),
a70       number(10),
a71       number(10),
a72       number(10),
a73       number(10),
a74       number(10),
a75       number(10),
a76       number(10),
a77       number(10),
a78       number(10),
a79       number(10),
a80       number(10),
a81       number(10),
a82       number(10),
a83       number(10),
a84       number(10),
a85       number(10),
a86       number(10),
a87       number(10),
a88       number(10),
a89       number(10),
a90       number(10),
a91       number(10),
a92       number(10),
a93       number(10),
a94       number(10),
a95       number(10),
a96       number(10),
a97       number(10),
a98       number(10),
a99       number(10),
a100      number(10)
)

alter table table_100cols
  add constraint PK_100cols primary key (a1)
  using index 
  tablespace CS_stat
  pctfree 10
  initrans 2
  maxtrans 255
  storage
  (
    initial 64K
    minextents 1
    maxextents unlimited
  );

  alter table table_100cols
  add constraint PK_100cols primary key (a1)
  using index 
  tablespace users
  pctfree 10
  initrans 2
  maxtrans 255
  storage
  (
    initial 64K
    minextents 1
    maxextents unlimited
  );

  insert into table_100cols values(1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100);
4.9    性能测试脚本
create sequence seq_gg start with 1 INCREMENT BY 1 MAXVALUE 999999999 cache 500000;

create or replace procedure gg_insert
is
begin
    for i in 1..1000000 loop
        insert into CM_USER (USER_ID,REGION_CODE,ACC_ID,CAUT_ID,USER_STATUS,USER_TYPE)
        values(seq_gg.nextval,571,1111,2222,3,4);
        if mod(i,1000)=0 then
            commit;
        end if;
    end loop;
    commit;
end;
/

create or replace procedure gg_update
is
CURSOR c_gg IS
    SELECT rowid FROM CM_USER;
v_rowid UROWID;
i number(10);
BEGIN
    OPEN c_gg;
    for i in 1..1000000 loop
        FETCH c_gg INTO v_rowid;
        EXIT WHEN c_gg%NOTFOUND;

        UPDATE CM_USER SET ACC_ID=seq_gg.nextval WHERE rowid = v_rowid;
        if mod(i,1000)=0 then
            commit;
        end if;
    end loop;
    commit;
    CLOSE c_gg;
END;
/

create or replace procedure gg_delete
is
begin
    for i in 1..1000 loop
        delete from  CM_USER where rownum<1001;
        commit;
    end loop;
    commit;
end;
/

 

 

attachment:

ogg parameters

 

 

 

Oracle GoldenGate 在其新推出的版本11.2 中提供了全方位的多字节支持,
可实现对表名/列名,以及DDL 中常见多字节文字如中文的支持。

注意:不同字符集DML 复制必须要将目标的NLS_LANG 变量设置为源端的NLS_LANG 实际值。

 

实验证明,本版本可以支持相同和不同字符集之间带有多字节对象以及数据的DML 和DDL 复制。

新版本的 DDL 复制默认不再开启 addtrandata,需要手工配置ddloptions。Oracle 11.2.0.2以上版本则推荐使用schema level 附加日志,参考步骤:

改用schemal level trandata:
SQL> exec dbms_streams_auth.grant_admin_privilege(‘ogg’);
GGSCI (dbsrv64.localdomain) 2> dblogin userid ogg
GGSCI (dbsrv64.localdomain) 3> ADD SCHEMATRANDATA source
2012-02-23 03:26:04 INFO OGG-01788 SCHEMATRANDATA has
been added on schema source.

 

注意:目前似乎不支持在 map 或者 table 里面使用中文进行匹配,
需要进一步验证。例如下面的将全部被认为是unmapped:
map source.中文*, target target.*;
如果两端字符集不同,则需要将目标的 NLS_LANG 变量设置为源
端的NLS_LANG实际值,可参考之前的最佳实践。
使用defgen 可以指定生成的表定义文件编码,不过实际配置要注意环境变量与数据库环境变量的统一,例如中文环境参考如下:
NLS_LANG=SIMPLIFIED CHINESE_CHINA.ZHS16GBK
LANG=zh_CN.GBK

 

Oracle GoldenGate 在其新推出的版本11.2 中提供了两种模式抽取:
1 传统的挖掘日志模式;
2 新的整合捕获模式。该模式使用了Oracle XStream 接口进行数据捕捉。

 schema level trandata 必须要Oracle 11.2.0.2 以上;
 整合模式只有在11.2.0.3 以上才能支持更为广泛的数据类型。

 

1) 数据库打开最小附加日志;
alter database add supplemental log data;
2) 为source schema 加入schema level 附加日志:
SQL> exec dbms_streams_auth.grant_admin_privilege(‘ogg’);
GGSCI (dbsrv64.localdomain) 2> dblogin userid ogg
GGSCI (dbsrv64.localdomain) 3> ADD SCHEMATRANDATA source
2012-02-23 03:26:04 INFO OGG-01788 SCHEMATRANDATA has been
added on schema source.
3) 给抽取进程用户赋予admin 权限:
begin
DBMS_GOLDENGATE_AUTH.GRANT_ADMIN_PRIVILEGE(
grantee => ‘source’,
privilege_type => ‘capture’,
grant_select_privileges => true,
do_grants => TRUE
);
end;
/
4) 在数据库中注册抽取进程,此时会自动配置XStream:
DBLOGIN USERID source, PASSWORD oracle
REGISTER EXTRACT ext01 DATABASE
5) 配置OGG 的extract 和replicat 进程
ADD EXTRACT ext01, INTEGRATED TRANLOG, BEGIN NOW

 

 

Add exttrail ./dirdat/ml, ext ext01
Add rep rep01, exttrail ./dirdat/ia
GGSCI (dbsrv64.localdomain) 5> view param ext01
EXTRACT ext01
SETENV (ORACLE_SID = “oragbk”)
–SETENV (NLS_LANG = “AMERICAN_AMERICA.ZHS16GBK”)
SETENV (NLS_LANG = “SIMPLIFIED CHINESE_CHINA.ZHS16GBK”)
USERID source, PASSWORD oracle
ddl include all
–ddloptions addtrandata,report
ddloptions report
tranlogoptions dblogreader
EXTTRAIL ./dirdat/ml
dynamicresolution
tableexclude source.AQ*;
tableexclude source.OGG*;
tableexclude source.SYS*;
table source.*;

 

GGSCI (dbsrv64.localdomain) 6> view param rep01
replicat rep01

 

–checkparams
–SETENV (ORACLE_SID = “oragbk”)
SETENV (ORACLE_SID = “orautf”)
SETENV (NLS_LANG = “SIMPLIFIED CHINESE_CHINA.ZHS16GBK”)
–SETENV (NLS_LANG = “AMERICAN_AMERICA.AL32UTF8”)
userid target, password oracle
ddl include mapped
ddlerror 24344,ignore
ddlerror 955,ignore
–DDLOPTIONS REPORT
reperror default,discard
–reperror default, abend
discardfile ./dirrpt/repia.dsc,append
assumetargetdefs
map source.*, target target.*;
6) 确认进程启动后,在源端添加中文表,并增加和更新记录,成功。
注意:
使用整合模式会在extract 用户下建立一些AQ$/SYS/OGG$开头的表,建议使用一个专门用户。本例子中直接抽取了source 自己的表,需要将这些系统表排除掉

 

 

GGSCI (mlab2.oracle.com) 2>  ADD SCHEMATRANDATA ogg_maclean

2013-11-24 00:42:48  ERROR   OGG-01790  Failed to ADD SCHEMATRANDATA on schema ogg_maclean because of the following SQL error: ORA-26947: Oracle GoldenGate replication is not enabled.
ORA-06512: at “SYS.DBMS_CAPTURE_ADM_INTERNAL”, line 1565
ORA-06512: at “SYS.DBMS_CAPTURE_ADM_INTERNAL”, line 1074
ORA-06512: at “SYS.DBMS_CAPTURE_ADM”, line 722
ORA-06512: at line 1 SQL BEGIN sys.dbms_capture_adm.PREPARE_SCHEMA_INSTANTIATION(‘ogg_maclean’,’ALLKEYS_ON’); END;.

GGSCI (mlab2.oracle.com) 3> ADD SCHEMATRANDATA  goldengate;

2013-11-24 00:43:29  ERROR   OGG-01796  Schema: goldengate;, does not exist.

GGSCI (mlab2.oracle.com) 4> ADD SCHEMATRANDATA  goldengate

2013-11-24 00:43:38  ERROR   OGG-01790  Failed to ADD SCHEMATRANDATA on schema goldengate because of the following SQL error: ORA-26947: Oracle GoldenGate replication is not enabled.
ORA-06512: at “SYS.DBMS_CAPTURE_ADM_INTERNAL”, line 1565
ORA-06512: at “SYS.DBMS_CAPTURE_ADM_INTERNAL”, line 1074
ORA-06512: at “SYS.DBMS_CAPTURE_ADM”, line 722
ORA-06512: at line 1 SQL BEGIN sys.dbms_capture_adm.PREPARE_SCHEMA_INSTANTIATION(‘goldengate’,’ALLKEYS_ON’); END;.

11.2.0.4中需要设置

SQL> alter system set enable_goldengate_replication=true;

System altered.

 

 

WARNING OGG-02051 Not enough database memory to service Extract in integrated capture.

==>Streams: resolve low memory condition+LogMiner reader: buffer+LogMiner builder: memory=> Streams Pool Size 不足 增大streams_pool_size即可

 

 

 

source: HP-IA 192.168.174.201/202    root/roothp sys/sys or oracle
target: Linux  192.168.110.200/116.180 root/roothp 
安装目录:/app/ogg/va

复制表: unipostdb.
T_PUB_JNL_SAV
T_PUB_UNREG
T_CDM_LEDGER

TMP_CARD_LEDGER                TABLE
TMP_LEDGER                     TABLE
TRIG_CDM_LEDGER                TABLE
T_CARD_ACC                     TABLE
T_CARD_ATM                     TABLE
T_CARD_LEDGER                  TABLE
T_CDM_LEDGER                   TABLE
T_PUB_JNL_SAV                  TABLE
T_PUB_UNREG                    TABLE
UNIPOSTDB_TEST                 TABLE
UNREG_BAK                      TABLE

conn unipostdb/unipostdb

select count(*) from unipostdb.T_PUB_JNL_SAV;
select count(*) from unipostdb.T_PUB_UNREG;
select count(*) from unipostdb.T_CDM_LEDGER;

 truncate table unipostdb.T_PUB_JNL_SAV;

truncate table unipostdb.T_PUB_UNREG;

alter extga, begin now
alter extgb, begin now
alter extgc, begin now
alter extgd, begin now

Set pages 999
Set long 4000
Select dbms_metadata.get_ddl('TABLE', 'T_PUB_JNL_SAV', 'UNIPOSTDB') from dual;
Select dbms_metadata.get_ddl('TABLE', 'T_PUB_UNREG', 'UNIPOSTDB') from dual;
Select dbms_metadata.get_ddl('TABLE', 'T_CDM_LEDGER', 'UNIPOSTDB') from dual;

 ,CLT_SEQNO 

col table_name for a20
col constraint_name for a20
col column_name for a20
select cols.table_name,cols.constraint_name,cols.column_name from user_cons_columns cols, user_constraints cons 
where  cons.table_name = 'T_PUB_UNREG'  and cols.constraint_name=cons.constraint_name;

GLOBALS:
CHECKPOINTTABLE ogg.chkpt
GGSCHEMA ogg

ogg source:
add checkpointtab 

add ext extga, tranlog, begin now, threads 2
add rmttrail ./dirdat/ra, ext extga, megabytes 100

add ext extgb, tranlog, begin now, threads 2
add rmttrail ./dirdat/rb, ext extgb, megabytes 100

add ext extgc, tranlog, begin now, threads 2
add rmttrail ./dirdat/rc, ext extgc, megabytes 100

EXTRACT extga
USERID ogg, PASSWORD ogg
rmthost  192.168.110.200, mgrport 7809
rmttrail ./dirdat/ra 
TRANLOGOPTIONS DBLOGREADER
TRANLOGOPTIONS BUFSIZE 10000000, DBLOGREADERBUFSIZE 4194304
REPORTCOUNT EVERY 2 MINUTES, RATE
--dynamicresolution
--GETTRUNCATES
table unipostdb.T_PUB_JNL_SAV;

EXTRACT extgb
USERID ogg, PASSWORD ogg
rmthost  192.168.110.200, mgrport 7809
rmttrail ./dirdat/rb 
TRANLOGOPTIONS DBLOGREADER
TRANLOGOPTIONS BUFSIZE 10000000, DBLOGREADERBUFSIZE 4194304
REPORTCOUNT EVERY 2 MINUTES, RATE
--dynamicresolution
--GETTRUNCATES
table unipostdb.T_PUB_UNREG;

EXTRACT extgc
USERID ogg, PASSWORD ogg
TRANLOGOPTIONS DBLOGREADER
TRANLOGOPTIONS BUFSIZE 10000000, DBLOGREADERBUFSIZE 4194304
rmthost  192.168.110.200, mgrport 7809
rmttrail ./dirdat/rc 
REPORTCOUNT EVERY 2 MINUTES, RATE
--dynamicresolution
--GETTRUNCATES
table unipostdb.T_CDM_LEDGER;

ogg target:
-----------------------------------------------------
add rep repaa, exttrail ./dirdat/ra

replicat repaa
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repaa.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (1, 3) );

add rep repab, exttrail ./dirdat/ra

replicat repab
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repab.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (2, 3) );

add rep repac, exttrail ./dirdat/ra

replicat repac
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repac.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (3, 3) );
-------------------------------------------

add rep repba, exttrail ./dirdat/rb
edit param repba

replicat repba
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repba.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_UNREG, FILTER ( @RANGE (1, 3) );

add rep repbb, exttrail ./dirdat/rb
edit param repbb

replicat repbb
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repbb.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_UNREG, FILTER ( @RANGE (2, 3) );

add rep repbc, exttrail ./dirdat/rb

replicat repbc
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repbc.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_UNREG, FILTER ( @RANGE (3, 3) );

-------------------------------------------------------------------

add rep repca, exttrail ./dirdat/rc
edit param repca

replicat repca
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repca.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_CDM_LEDGER, FILTER ( @RANGE (1, 3) );

add rep repcb, exttrail ./dirdat/rc
edit param repcb

replicat repcb
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repcb.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_CDM_LEDGER, FILTER ( @RANGE (2, 3) );

add rep repcc, exttrail ./dirdat/rc
edit param repcc

replicat repcc
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repcc.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_CDM_LEDGER, FILTER ( @RANGE (3, 3) );

----------------------------------

alter repba, extseqno 0, extrba 0
alter repbb, extseqno 0, extrba 0
alter repbc, extseqno 0, extrba 0

alter repca, extseqno 0, extrba 0
alter repcb, extseqno 0, extrba 0
alter repcc, extseqno 0, extrba 0

-----------------------------------------
linux:
processor       : 63
vendor_id       : GenuineIntel
cpu family      : 6
model           : 46
model name      : Intel(R) Xeon(R) CPU           X7560  @ 2.27GHz
stepping        : 6
cpu MHz         : 2261.119
cache size      : 24576 KB
physical id     : 7
siblings        : 8
core id         : 11
cpu cores       : 4
apicid          : 247
fpu             : yes
fpu_exception   : yes
cpuid level     : 11
wp              : yes
flags           : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm 
syscall nx rdtscp lm constant_tsc ida nonstop_tsc pni monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm
bogomips        : 4522.22
clflush size    : 64
cache_alignment : 64
address sizes   : 44 bits physical, 48 bits virtual
power management: [8]

hp-unix
32c X 2 

------------------------------

PURGEOLDEXTRACTS ./dirdat/*, USECHECKPOINTS

35 + 40 G日志,产生24G队列

Log Read Checkpoint  Oracle Redo Logs
                     2011-12-30 14:49:21  Thread 1, Seqno 1956, RBA 397949752
Log Read Checkpoint  Oracle Redo Logs
                     2011-12-30 14:49:21  Thread 2, Seqno 1339, RBA 293856

----------------------------------------------------------------------

add ext extgd, tranlog, begin now, threads 2
add rmttrail ./dirdat/rd, ext extgd, megabytes 100

EXTRACT extgd
USERID ogg, PASSWORD ogg
rmthost  192.168.110.200, mgrport 7809
rmttrail ./dirdat/rd
TRANLOGOPTIONS DBLOGREADER
TRANLOGOPTIONS BUFSIZE 10000000, DBLOGREADERBUFSIZE 4194304
REPORTCOUNT EVERY 2 MINUTES, RATE
--dynamicresolution
GETTRUNCATES
table unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (2, 2,  CLT_SEQNO) );

-----------

add rep repda, exttrail ./dirdat/rd

replicat repda
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repda.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (1, 3) );

add rep repdb, exttrail ./dirdat/rd

replicat repdb
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repdb.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (2, 3) );

add rep repdc, exttrail ./dirdat/rd

replicat repdc
userid ogg, password ogg
reperror default, abend
discardfile ./dirrpt/repdc.dsc,append, megabytes 10
REPORTCOUNT EVERY 2 MINUTES, RATE
GROUPTRANSOPS 2000
BATCHSQL
assumetargetdefs
--allownoopupdates
--dynamicresolution
map unipostdb.T_PUB_JNL_SAV, target unipostdb.T_PUB_JNL_SAV, FILTER ( @RANGE (3, 3) );

-----------------------------------
session_cached_cursors 50 -> 200

 

 

 

very_large_table.sql

 

OGG-01738 BOUNDED RECOVERY

2012-10-20 10:28:02  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 79286800, SCN: 0.3712874 (3712874), Timesta
mp: 2012-10-19 22:27:45.000000, Thread: 1, end=SeqNo: 343, RBA: 79287296, SCN: 0.3712874 (3712874), Timestamp: 2012-10-19 22:27:45.000000, Thread: 1.

2012-10-20 14:28:05  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 107000336, SCN: 0.3725744 (3725744), Timest
amp: 2012-10-20 02:27:14.000000, Thread: 1, end=SeqNo: 343, RBA: 107000832, SCN: 0.3725744 (3725744), Timestamp: 2012-10-20 02:27:14.000000, Thread: 1.

2012-10-20 18:28:06  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 128054288, SCN: 0.3739371 (3739371), Timest
amp: 2012-10-20 06:28:02.000000, Thread: 1, end=SeqNo: 343, RBA: 128054784, SCN: 0.3739371 (3739371), Timestamp: 2012-10-20 06:28:02.000000, Thread: 1.

2012-10-20 22:28:06  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 153368080, SCN: 0.3752583 (3752583), Timest
amp: 2012-10-20 10:27:46.000000, Thread: 1, end=SeqNo: 343, RBA: 153368576, SCN: 0.3752583 (3752583), Timestamp: 2012-10-20 10:27:46.000000, Thread: 1.

2012-10-21 02:28:08  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 165712912, SCN: 0.3763760 (3763760), Timest
amp: 2012-10-20 14:28:00.000000, Thread: 1, end=SeqNo: 343, RBA: 165713408, SCN: 0.3763760 (3763760), Timestamp: 2012-10-20 14:28:00.000000, Thread: 1.

2012-10-21 06:28:15  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 179789328, SCN: 0.3774866 (3774866), Timest

...skipping one line
2012-10-21 10:28:16  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 201859088, SCN: 0.3788193 (3788193), Timest
amp: 2012-10-20 22:26:32.000000, Thread: 1, end=SeqNo: 343, RBA: 201859584, SCN: 0.3788193 (3788193), Timestamp: 2012-10-20 22:26:32.000000, Thread: 1.

2012-10-21 14:28:26  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 246480912, SCN: 0.3803284 (3803284), Timest
amp: 2012-10-21 02:27:31.000000, Thread: 1, end=SeqNo: 343, RBA: 246481408, SCN: 0.3803284 (3803284), Timestamp: 2012-10-21 02:27:31.000000, Thread: 1.

2012-10-21 18:28:33  INFO    OGG-01738  BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p17448_extr: start=SeqNo: 343, RBA: 291493392, SCN: 0.3821051 (3821051), Timest
amp: 2012-10-21 06:28:22.000000, Thread: 1, end=SeqNo: 343, RBA: 291493888, SCN: 0.3821051 (3821051), Timestamp: 2012-10-21 06:28:22.000000, Thread: 1.

 

 

 

在Oracle GoldenGate版本11.x中,引入了Bounded Recovery(BR)的概念,即允许extract对于长事务(long running transaction 比BRINTERVAL指定值更长的事务)写入到本地BR目录。当extract重启时,它会首先读取BR文件,取而代之读取恢复检查点指定的归档日志,这样有助于提升性能以及减少对旧归档文件的依赖。

但是当在RAC环境中使用Bounded Recovery(BR)特性来恢复一个异常abend掉的extract的话,小概率可能会遇到extract hang住或丢失特性的事务。该BUG仅在RAC环境中或者单实例情况下使用多个thread设置时出现。

1. bug 10368242: transaction loss with BR
When a transaction is committed, it will be flushed to trail file. But when BR writing started (after the transaction commit) and extract abends abnormally, the extract may not have chance to flush the committed transaction to trail. When extract restarted, it will read from BR, and leave that committed transaction as persist committed transaction in memory and never be written to trail. So this committed transaction may be lost.

The problem will not happen when the extract stops in normal mode.

2.bug 12532428 (base bug 10408077 ): extract hung when using BR and new objects are added to extract
With BR setup, when new objects (table, sequence, DDL, et al) are including in the extract, restarted extract will pick up more data that causes the producer queue limit (a fixed number) used by BR be reached. Because the extract is still in BR recovery, the consumer thread is stopped and not processing data from the producer queues. This caused a deadlock, and the extract will appear hung.

解决方案

1. 对于BUG 12532428引起的事务丢失,该BUG在11.1.1.1中被修复,且会在11.1.1.0中被backport。

2. 对于BUG 10408077 引起的extract hang,该BUG在11.1.1.1和 11.1.1.0.30中被修复,也可以如下workaround绕过:

A workaround with earlier 11.1.1.0 version is to start extract with BRRESET, when new object is added to an extract. All the archived logs since recovery checkpoint need to be available.

 

ggsci> start extract, BRRESET

 

When running Oracle Golden Gate 11.1.1.0.6 or higher, extract is “abending” every 4 hours on the hour. This approximates the same time or interval that Bounded Recovery is set to by default.
Extract can be restarted and continues to work but then fails again after 4 hours with the same errors as shown below.

ERROR
———
2011-02-06 05:15:38 WARNING OGG-01573 br_validate_bcp: failed in call to: ggcrc64valid.

2011-02-06 05:15:38 WARNING OGG-01573 br_validate_bcp: failed in call to: br_validate_bcp.

2011-02-06 05:15:38 INFO OGG-01639 BOUNDED RECOVERY: ACTIVE: for object pool 1: p7186_Redo Thread 1.

2011-02-06 05:15:38 INFO OGG-01640 BOUNDED RECOVERY: recovery start XID: 0.0.0.

2011-02-06 09:15:46 INFO OGG-01738 BOUNDED RECOVERY: CHECKPOINT: for object pool 1: p7186_Redo Thread 1: start=SeqNo: 21659, RBA: 117520912, SCN: 0.2984644709 (2984644709), Timestamp: 2011-02-06 09:15:44.000000, end=SeqNo: 21659, RBA: 117602816, SCN: 0.2984644709 (2984644709), Timestamp: 2011-02-06 09:15:44.000000.

Cause

Under these conditions, this may be a problem with the Bounded Recovery Checkpoint file. It is likely corrupted.

Solution

The solution is to reset the Bounded Recovery Checkpoint file when restarting the extract like:

GGSCI> start <extract> BRRESET

 

 

BOUNDED RECOVERY
错误信息:BOUNDED RECOVERY: reset to initial or altered checkpoint.
数据库问题,不能读取第2个节点的archivelog文件

ALTER EXTSEQNO must be performed on each corresponding downstream reader

2009-10-26 23:28:19 WARNING OGG-01519 Waiting at EOF on input trail file ./dirdat/bm000002, which is not marked as complete; but succeeding trail file ./dirdat/bm000003 exists. If ALTER ETROLLOVER has been performed on source extract, ALTER EXTSEQNO must be performed on each corresponding downstream reader.

2009-10-26 23:30:20 INFO OGG-01021 Command received from GGSCI: STATS.

2009-10-26 23:32:25 INFO OGG-01021 Command received from GGSCI: GETLAG.

2009-10-26 23:32:50 INFO OGG-01021 Command received from GGSCI: STATS.

2009-10-26 23:33:20 INFO OGG-01021 Command received from GGSCI: GETLAG.

2009-10-26 23:34:15 INFO OGG-01021 Command received from GGSCI: STATS.

2009-10-26 23:39:02 WARNING OGG-01519 Waiting at EOF on input trail file ./dirdat/bm000002, which is not marked as complete; but succeeding trail file ./dirdat/bm000003 exists. If ALTER ETROLLOVER has been performed on source extract, ALTER EXTSEQNO must be performed on each corresponding downstream reader.

 

 

 

OGG常见初始化方案

数据初始化

RMAN初始化方案

本方案在初始化过程中,不需要源数据库停机。

步骤如下:

生产端===============================================

1)        源端和目标端安装OGG软件,并启动manager

2)        源端配置OGG的Extract及DataPump

3)      源端启动Extract进程,并且人工记录抽取进程启动的时间点,例如:“2011-05-03 11:20:55” ,将此时间点作为在下一步中查找长事务的一个判断条件;可以在数据库中查询该时间点:”select to_char(sysdate,’yyyy-mm-dd hh24:mi:ss’) from dual”

4)        长事务处理:在V$TRANSACTION中查找当前正在运行的事务(RAC环境下,查看gv$transaction),并找出相应的长事务。在执行rman备份之前,确保这些长事务已经完成,或者被kill掉(需要得到dba或者相关管理人员的确认)。

Select  start_time  from gv$transaction where to_date(start_time, ‘yyyy-mm-dd hh24:mi:ss’)<to_date(‘2011-05-03 11:20:55’, ‘yyyy-mm-dd hh24:mi:ss’);

注:通过上面的SQL语句查找到比第3歩中记录下的时间点早的事务,需要等到该事务结束,然后执行rman的备份;

 

 

5)        使用oracle用户登录,执行rman备份

–备份数据库:

$rman target /

run {

allocate channel ch00 type disk maxpiecesize 20g;

allocate channel ch01 type disk maxpiecesize 20g;

crosscheck backupset;

delete noprompt expired backupset;

sql ‘alter system archive log current’;

backup as backupset skip inaccessible tag hot_db_bk_level0

full database

format ‘/u01/backup/bk_%s_%p_%t’;

release channel ch00;

release channel ch01;

}

–备份归档和控制文件:(如果有三个日志文件组,可以按照以下方式进行切换,如果更多,则对应增加切换的次数即可);

run {

ALLOCATE CHANNEL ch00 TYPE DISK MAXPIECESIZE 20G;

ALLOCATE CHANNEL ch01 TYPE DISK MAXPIECESIZE 20G;

sql ‘alter system switch logfile’;

sql ‘alter system switch logfile’;

sql ‘alter system switch logfile’;

sql ‘alter system archive log current’;

BACKUP ARCHIVELOG ALL FORMAT ‘/u01/backup/ARCH_%U’;

BACKUP CURRENT CONTROLFILE FORMAT ‘/u01/backup/bk_controlfile’;

RELEASE CHANNEL ch00;

RELEASE CHANNEL ch01;

}

在源机执行

SQL>set linesize 200

SQL>select * from Gv$log;

GROUP#    THREAD#  SEQUENCE#      BYTES    MEMBERS ARC STATUS           FIRST_CHANGE# FIRST_TIM

———- ———- ———- ———- ———- — —————- ————- ———

         1          1         23  104857600          1 NO  CURRENT                 218512 01-DEC-10

         2          1         22  104857600          1 YES ACTIVE                  218422 01-DEC-10

         3          2         17  104857600          1 NO  CURRENT                 218503 01-DEC-10

         4          2         16  104857600          1 YES INACTIVE                218412 01-DEC-10 (*)

 

获取不活动的已归档日志的最后一个SCN号,如果有多个已归档的INACTIVE的组,取最大的FIRST_CHANGE#,这里取218412

记录该SCN号(在“第10歩”目标端恢复数据库时使用),将备份文件,FTP到目标机

========================================

hpux2:/u01/oracle$ ftp 192.168.1.27

Connected to 192.168.1.27.

220 hpux_dr FTP server (Revision 4.0 Version wuftpd-2.6.1 Wed Jun 18 07:11:14 GMT 2008) ready.

Name (192.168.1.27:oracle): oracle

331 Password required for oracle.

Password:

230 User oracle logged in.

Remote system type is UNIX.

Using binary mode to transfer files.

ftp> cd /u01/oracle/rmanbk

250 CWD command successful.

ftp> lcd /u01/oracle/rmanbk

Local directory now /u01/oracle/rmanbk

ftp> bin

200 Type set to I.

ftp> prompt

Interactive mode off.

ftp> mput hp*

 

容灾端===============================================

6)        使用oracle用户登录目标机执行恢复

ü  在目标机建立相同的目录结构,并正确设置:ORACLE_BASE,ORACLE_HOME,ORACLE_SID环境变量。

ü  拷贝源机init{ORACLE_SID}.ora文件到目标机

ü  cp $ORACLE_HOME/dbs/initorcl.ora  目标机对应目录

ü  生成口令文件或者copy $ORACLE_HOME/dbs/PWD{ORACLE_SID}

 

7)        启动数据库到nomount状态

sqlplus ‘/ as sysdba’

SQL>startup nomount

 

8)        启动RMAN恢复控制文件

rman target /

RMAN>restore controlfile from ‘/u01/oracle/rmanbk/hpdb_ctlfile_757366280381.dbf’;

 

9)        将数据库更改为mount状态

sqlplus ‘/ as sysdba’

SQL>alter database mount;

 

10)     启动RMAN恢复数据库

rman target /

RMAN>restore database;

 

RMAN>

run

{

   set archivelog destination to ‘/u01/arch’;

   SET UNTIL SCN 218412;     –在第5歩源端记录下的scn号

   RECOVER DATABASE;

}

 

11)     使用以下SQL语句查找目标端数据库的SCN号:(得到该SCN号之后,在2.5.8章节中启动复制进程时,使用该SCN号。)

SQL> SELECT CHECKPOINT_CHANGE#,CHECKPOINT_TIME FROM V$DATAFILE_HEADER;

 

CHECKPOINT_CHANGE# CHECKPOIN

—————— ———

            218412 28-MAY-11

            218412 28-MAY-11

            218412 28-MAY-11

            218412 28-MAY-11

            218412 28-MAY-11

注:

a)  如果目标端数据库在“第10歩”操作之后,又额外追加了源数据库rman备份后的归档日志,导致目标端数据库的SCN号大于“第10歩”中SCN号。所以必须以目标端数据库当前的SCN为主,从而避免数据重复。

b)  如果目标端数据库在“第10歩”操作之后,没有额外追加源数据库rman备份后的归档日志,则“第11歩”中得到的SCN号应该与“第10歩”中的SCN号相等;

12)     (备选)如果源端为RAC,目标端为单机,需将目标机改为单机模式

删除多余的redolog group

SQL> select THREAD#, STATUS, ENABLED from v$thread;

 THREAD# STATUS ENABLED

———- —— ——–

1 OPEN   PUBLIC

2 CLOSED PRIVATE

 

SQL> select group# from v$log where THREAD#=2;

GROUP#

———-

4

5

6

 

SQL> alter database disable thread 2;

Database altered.

SQL> alter database drop logfile group 4;

alter database drop logfile group 4

ERROR at line 1:

ORA-00350: log 4 of instance racdb2 (thread 2) needs to be archived

ORA-00312: online log 4 thread 2: ‘/u01/oracle/oradata/ractest/log/redo04.log’

SQL> alter database clear unarchived logfile group 4;

Database altered.

SQL> alter database drop logfile group 4;

Database altered.

SQL> alter database drop logfile group 5;

Database altered.

 

SQL> alter database drop logfile group 6;

 

Database altered.

SQL> select THREAD#, STATUS, ENABLED from v$thread;

 

THREAD# STATUS ENABLED

———- —— ——–

1 OPEN   PUBLIC

 

 

删除多余UNDO表空间

SQL> show parameter undo;

NAME                                 TYPE        VALUE

———————————— ———– ——————————

undo_management                   string      AUTO

undo_retention                       integer     900

undo_tablespace                      string      UNDOTBS1

 

SQL> select tablespace_name from dba_tablespaces where contents=’UNDO’;

TABLESPACE_NAME

——————————

UNDOTBS1

UNDOTBS2

SQL> drop tablespace UNDOTBS2 including contents and datafiles;

Tablespace dropped.

 

如果源数据库为Oracle9i,因为RMAN时不备份TEMP表空间,需重新创建TEMP表空间

SQL> create temporary tablespace TEMP  tempfile ‘/u01/temp.dbf’ size 500M;

Tablespace created.

SQL> alter database default temporary tablespace TEMP;

Database altered.

13)     目标端打开数据库

alter database open resetlogs;

注意:由于此时源端和目标端一样,所以要先删除DDL那几个脚本,删除replicat进程再 重新添加,最后才能 start replicat repea,aftercsn <>

Oracle EXPDP/IMPDP初始化方案

在数据初始化的过程中,不需要源数据库停机。

1)        源端和目标端安装OGG软件,并启动manager

2)        源端配置OGG的Extract及DataPump

3)        源端启动Extract进程以及DataPump进程,并且人工记录抽取进程启动的时间点,例如:“2011-05-03 11:20:55” ,将此时间点作为在下一步中查找长事务的一个判断条件;

4)        长事务处理:在V$TRANSACTION中查找当前正在运行的事务(RAC环境下,查看gv$transaction),并找出相应的长事务。在执行rman备份之前,确保这些长事务已经完成,或者被kill掉(需要得到dba或者相关管理人员的确认)。

Select  start_time  from gv$transaction where to_date(start_time, ‘yyyy-mm-dd hh24:mi:ss’)<to_date(‘2011-05-03 11:20:55’, ‘yyyy-mm-dd hh24:mi:ss’);

注:通过上面的SQL语句查找到比第3歩中记录下的时间点早的事务,需要等到该事务结束,然后执行rman的备份;

5)        创建directory用于执行数据泵操作

–directory path for store dump file

CREATE OR REPLACE DIRECTORY DATA_PUMP AS ‘E:\OGG\data’;

grant read ,write on DIRECTORY DATA_PUMP to ggs ;

–users to execute expdp

grant read ,write on DIRECTORY DATA_PUMP to maclean ;

6)        源端获取数据库当前的SCN

SQL> select dbms_flashback.get_system_change_number from dual;

 

GET_SYSTEM_CHANGE_NUMBER

————————

                 3079975

7)        源端数据导出

在源端OS系统中执行数据导出,导出用户名、dmp文件名自行修改,导出完成后ftp至目标端datapump目录

expdp maclean/maclean directory=DATA_PUMP dumpfile=<dp_maclean.dmp> flashback_scn=<scn>

 

–以下操作在目标端执行

8)        创建directory用于执行数据泵操作,同时给ggs以及导入用户赋权

–directory path for store dump file

CREATE OR REPLACE DIRECTORY DATA_PUMP AS ‘/goldengate/data’;

grant read ,write on DIRECTORY DATA_PUMP to ggs ;

grant read ,write on DIRECTORY DATA_PUMP to maclean;

9)        目标端导入数据

— 以下操作在OS中执行,具体用户名与导入dmp文件名视具体情况而定

impdp maclean/maclean DUMPFILE=DATA_PUMP:DP_maclean.DMP

 

 

Script:GoldenGate For Oracle数据库预检查脚本

 

Script:GoldenGate For Oracle数据库预检查脚本

 

 

select * from dba_logstdby_not_unique where owner='&OWNER';

set null "NULL VALUE"
set feedback off
set heading off
set linesize 132 
set pagesize 9999
set echo off
set verify off
set trimspool on

col table_name for a30
col column_name for a30
col data_type for a15
col object_type for a20
col constraint_type_desc for a30
col Owner format a15

spool AllSchemaCheckOracle.out

SELECT '------ System Info: '
FROM dual;
set heading on
select to_char(sysdate, 'MM-DD-YYYY HH24:MI:SS') "DateTime: " from dual
/
select banner from v$version
/

select name, log_mode "LogMode", 
supplemental_log_data_min "SupLog: Min", supplemental_log_data_pk "PK",
supplemental_log_data_ui "UI", force_logging "Forced",
supplemental_log_data_fk "FK", supplemental_log_data_all "All",
to_char(created, 'MM-DD-YYYY HH24:MI:SS') "Created"
from v$database
/

select 
platform_name
from v$database
/
set heading off
SELECT '------ Objects stored in Tablespaces with Compression are not supported in the current release of OGG ' 
FROM dual;
set heading on
select
TABLESPACE_NAME,
DEF_TAB_COMPRESSION
from DBA_TABLESPACES
where 
DEF_TAB_COMPRESSION <> 'DISABLED';

set heading off
SELECT '------ Distinct Object Types and their Count By Schema: '
FROM dual;
set heading on
SELECT owner, object_type, count(*) total
FROM all_objects
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
GROUP BY object_type, owner
/

set heading off
SELECT '------ Distinct Column Data Types and their Count in the Schema: ' 
FROM dual;
set heading on
SELECT data_type, count(*) total
FROM all_tab_columns
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
GROUP BY data_type
/

set heading off
SELECT '------ Tables that will Fail Add Trandata (Only an issue for Oracle versions below Oracle 10G) in the Database ' 
FROM dual;
set heading on
SELECT distinct(table_name)
FROM dba_tab_columns
WHERE owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND column_id > 32
AND table_name in
(SELECT distinct(table_name)
FROM all_tables
WHERE owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
MINUS
(SELECT obj1.name
FROM SYS.user$ user1,
SYS.user$ user2,
SYS.cdef$ cdef,
SYS.con$ con1,
SYS.con$ con2,
SYS.obj$ obj1,
SYS.obj$ obj2
WHERE user1.name not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND cdef.type# = 2
AND con2.owner# = user2.user#(+)
AND cdef.robj# = obj2.obj#(+)
AND cdef.rcon# = con2.con#(+)
AND obj1.owner# = user1.user#
AND cdef.con# = con1.con#
AND cdef.obj# = obj1.obj#
UNION
SELECT idx.table_name
FROM all_indexes idx
WHERE idx.owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND idx.uniqueness = 'UNIQUE'))
/

set heading off
SELECT '------ Tables With No Primary Key or Unique Index by Schema: ' 
FROM dual;
set heading on
SELECT owner, table_name
FROM all_tables
WHERE owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
MINUS
(SELECT user1.name, obj1.name
FROM SYS.user$ user1,
SYS.user$ user2,
SYS.cdef$ cdef,
SYS.con$ con1,
SYS.con$ con2,
SYS.obj$ obj1,
SYS.obj$ obj2
WHERE user1.name not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND cdef.type# = 2 
AND con2.owner# = user2.user#(+)
AND cdef.robj# = obj2.obj#(+)
AND cdef.rcon# = con2.con#(+)
AND obj1.owner# = user1.user#
AND cdef.con# = con1.con#
AND cdef.obj# = obj1.obj#
UNION
SELECT distinct(owner), idx.table_name
FROM all_indexes idx
WHERE idx.owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND idx.uniqueness = 'UNIQUE')
/

set heading off
SELECT '------ Tables with NOLOGGING setting ' FROM dual;
SELECT '------ This may cause problems with missing data down stream. ' FROM dual;
set heading on
select owner, table_name, ' ', logging from DBA_TABLES
where logging <> 'YES'
and owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
UNION
select owner, table_name, partitioning_type, DEF_LOGGING "LOGGING" from DBA_part_tables
where DEF_LOGGING != 'YES' 
and owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
UNION
select table_owner, table_name, PARTITION_NAME, logging from DBA_TAB_PARTITIONS
where logging <> 'YES' 
and table_owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
UNION
select table_owner, table_name, PARTITION_NAME, logging from DBA_TAB_SUBPARTITIONS
where logging <> 'YES' 
and table_owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
;

set heading off
SELECT '------ Tables with Deferred constraints.Deferred constraints may cause TRANDATA to chose an incorrect Key ' FROM dual;
SELECT '------ Tables with Deferred constraints should be added using KEYCOLS in the trandata statement. Schema: ' ||:b0 FROM dual;
set heading on
SELECT c.TABLE_NAME,
c.CONSTRAINT_NAME,
c.CONSTRAINT_TYPE,
c.DEFERRABLE,
c.DEFERRED,
c.VALIDATED,
c.STATUS,
i.INDEX_TYPE,
c.INDEX_NAME,
c.INDEX_OWNER
FROM dba_constraints c,
dba_indexes i
WHERE
i.TABLE_NAME = c.TABLE_NAME
AND i.OWNER = c.OWNER
AND c.DEFERRED = 'DEFERRED'
And i.owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
;

set heading off
SELECT '------ Tables Defined with Rowsize > 2M in all Schemas '
FROM dual;
set heading on
SELECT table_name, sum(data_length) row_length_over_2M
FROM all_tab_columns
WHERE owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
GROUP BY table_name
HAVING sum(data_length) > 2000000
/

set heading off
SELECT '------ Tables With No Primary Key or Unique Index and Column lenght > 1M '
FROM dual;
set heading on
SELECT owner, table_name
FROM all_tab_columns
WHERE owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
group by owner, table_name
HAVING sum(data_length) > 1000000
MINUS
(SELECT user1.name, obj1.name
FROM SYS.user$ user1,
SYS.user$ user2,
SYS.cdef$ cdef,
SYS.con$ con1,
SYS.con$ con2,
SYS.obj$ obj1,
SYS.obj$ obj2
WHERE user1.name not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND cdef.type# = 2 
AND con2.owner# = user2.user#(+)
AND cdef.robj# = obj2.obj#(+)
AND cdef.rcon# = con2.con#(+)
AND obj1.owner# = user1.user#
AND cdef.con# = con1.con#
AND cdef.obj# = obj1.obj#
UNION
SELECT idx.owner, idx.table_name
FROM all_indexes idx
WHERE idx.owner not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND idx.uniqueness = 'UNIQUE')
/

set heading off
SELECT '------ Tables With CLOB, BLOB, LONG, NCLOB or LONG RAW Columns in ALL Schemas ' 
FROM dual;
set heading on
SELECT OWNER, TABLE_NAME, COLUMN_NAME, DATA_TYPE
FROM all_tab_columns
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND data_type in ('CLOB', 'BLOB', 'LONG', 'LONG RAW', 'NCLOB')
/

set heading off
SELECT '------ Tables With Columns of UNSUPPORTED Datatypes in ALL Schemas ' 
FROM dual;
set heading on
SELECT OWNER, TABLE_NAME, COLUMN_NAME, DATA_TYPE
FROM all_tab_columns
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND (data_type in ('ORDDICOM', 'BFILE', 'TIMEZONE_REGION', 'BINARY_INTEGER', 'PLS_INTEGER', 'UROWID', 'URITYPE', 'MLSLABEL', 'TIMEZONE_ABBR', 'ANYDATA', 'ANYDATASET', 'ANYTYPE')
or data_type like 'INTERVAL%')
/

set heading off
SELECT '------ Cluster, or Object Tables - ALL UNSUPPORTED - in ALL Schemas '
FROM dual;
set heading on
SELECT OWNER, TABLE_NAME, CLUSTER_NAME, TABLE_TYPE 
FROM all_all_tables
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND (cluster_name is NOT NULL or TABLE_TYPE is NOT NULL)
/

set heading off 
Select '------ All tables that have compression enabled (which we do not currently support): '
from dual;
set heading on
select owner, table_name
from DBA_TABLES
where COMPRESSION = 'ENABLED'
/

SELECT TABLE_OWNER, TABLE_NAME, COMPRESSION
FROM ALL_TAB_PARTITIONS
WHERE (COMPRESSION = 'ENABLED')
/
set heading off
SELECT '------ IOT (Fully support for Oracle 10GR2 (with or without overflows) using GGS 10.4 and higher) - in All Schemas: ' 
FROM dual;
set heading on
SELECT OWNER, TABLE_NAME, IOT_TYPE, TABLE_TYPE 
FROM all_all_tables
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
AND (IOT_TYPE is not null or TABLE_TYPE is NOT NULL)
/
set heading off
SELECT '------ Tables with Domain or Context Indexes' 
FROM dual;
set heading on
SELECT OWNER, TABLE_NAME, index_name, index_type 
FROM dba_indexes 
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
and index_type = 'DOMAIN'
/

set heading off
SELECT '------ Types of Constraints on the Tables in ALL Schemas '
FROM dual;
set heading on
SELECT DECODE(constraint_type,'P','PRIMARY KEY','U','UNIQUE', 'C', 'CHECK', 'R', 
'REFERENTIAL') constraint_type_desc, count(*) total
FROM all_constraints
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
GROUP BY constraint_type
/
set heading off
SELECT '------ Cascading Deletes on the Tables in ALL Schemas ' 
FROM dual;
set heading on
SELECT owner, table_name, constraint_name
FROM all_constraints
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
and constraint_type = 'R' and delete_rule = 'CASCADE'
/

set heading off
SELECT '------ Tables Defined with Triggers in ALL Schema: '
FROM dual;
set heading on
SELECT table_name, COUNT(*) trigger_count
FROM all_triggers
WHERE OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
GROUP BY table_name
/
set heading off
SELECT '------ Performance issues - Reverse Key Indexes Defined in ALL Schema: '
FROM dual;

col TABLE_OWNER format a10
col INDEX_TYPE format a12
SET Heading on

select 
OWNER, 
INDEX_NAME,
INDEX_TYPE, 
TABLE_OWNER,
TABLE_NAME, 
TABLE_TYPE, 
UNIQUENESS,
CLUSTERING_FACTOR,
NUM_ROWS,
LAST_ANALYZED,
BUFFER_POOL
from dba_indexes
where index_type = 'NORMAL/REV'
And OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
/

SET Heading off
SELECT '------ Sequence numbers - Sequences could be a issue for HA configurations '
FROM dual;

COLUMN SEQUENCE_OWNER FORMAT a15
COLUMN SEQUENCE_NAME FORMAT a30
COLUMN INCR FORMAT 999
COLUMN CYCLE FORMAT A5
COLUMN ORDER FORMAT A5
SET Heading on
SELECT SEQUENCE_OWNER,
SEQUENCE_NAME,
MIN_VALUE,
MAX_VALUE,
INCREMENT_BY INCR,
CYCLE_FLAG CYCLE,
ORDER_FLAG "ORDER",
CACHE_SIZE,
LAST_NUMBER
FROM DBA_SEQUENCES
WHERE SEQUENCE_OWNER not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC')
/
set linesize 132

col "Avg Log Size" format 999,999,999
select sum (BLOCKS) * max(BLOCK_SIZE)/ count(*)"Avg Log Size" From gV$ARCHIVED_LOG;

Prompt Table: Frequency of Log Switches by hour and day
SELECT SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),1,5) DAY, 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'00',1,0)),'99') "00", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'01',1,0)),'99') "01", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'02',1,0)),'99') "02", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'03',1,0)),'99') "03", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'04',1,0)),'99') "04", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'05',1,0)),'99') "05", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'06',1,0)),'99') "06", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'07',1,0)),'99') "07", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'08',1,0)),'99') "08", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'09',1,0)),'99') "09", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'10',1,0)),'99') "10", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'11',1,0)),'99') "11", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'12',1,0)),'99') "12", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'13',1,0)),'99') "13", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'14',1,0)),'99') "14", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'15',1,0)),'99') "15", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'16',1,0)),'99') "16", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'17',1,0)),'99') "17", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'18',1,0)),'99') "18", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'19',1,0)),'99') "19", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'20',1,0)),'99') "20", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'21',1,0)),'99') "21", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'22',1,0)),'99') "22", 
TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'23',1,0)),'99') "23" 
FROM V$LOG_HISTORY 
GROUP BY SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),1,5) 
order by 1;
set heading off
SELECT '------ Summary of log volume processed by day for last 7 days: '
FROM dual;
set heading on
select to_char(first_time, 'mm/dd') ArchiveDate,
sum(BLOCKS*BLOCK_SIZE/1024/1024) LOGMB
from v$archived_log
where first_time > sysdate - 7
group by to_char(first_time, 'mm/dd')
order by to_char(first_time, 'mm/dd');
/
set heading off
SELECT '------ Summary of log volume processed per hour for last 7 days: ' 
FROM dual;
set heading on
select to_char(first_time, 'MM-DD-YYYY') ArchiveDate, 
to_char(first_time, 'HH24') ArchiveHour,
sum(BLOCKS*BLOCK_SIZE/1024/1024) LogMB
from v$archived_log
where first_time > sysdate - 7
group by to_char(first_time, 'MM-DD-YYYY'), to_char(first_time, 'HH24')
order by to_char(first_time, 'MM-DD-YYYY'), to_char(first_time, 'HH24');
/ 

set heading off
select '* This output may be found in file: AllSchemsCheckOracle.out' from dual
/

spool off
undefine b0
-- exit

【转】GoldenGate的国际化字符集支持

•OGG原则上支持相同字符集之间的复制
–包括异构和同构数据库
–源和目标均需根据数据库字符集配置环境变量NLS_LANG
•目标是Oracle时,OGG支持字符集子集向超集的复制
–即目标字符集必须包含源端字符集
–源端和目标均需根据源库数据库字符集配置环境变量NLS_LANG
–OGG依赖于Oracle数据库实现字符集转换,它本身无此功能
–除非两端除字符集外完全相同,否则需要使用defgen产生表定义文件
•Oracle不同字符集之间转换(非官方)
–必须使用defgen文件
–目标同样根据源库数据库字符集配置设置环境变量NLS_LANG
•其它有关多字节说明
–不支持多字节的metadata,如表名、字段名、用户名等
–所有内建函数包括过滤在内不支持多字节
–如两端使用了不同字符长度语义( semantics ),必须使用定义文件

配置GoldenGate源端Manager参数

大家都知道配置OGG源端的第一步工作是部署源端MGR(Manager)进程的参数文件,该参数文件可以到./dirprm/mgr.prm中找到。

 

使用如下命令编辑manager的参数文件:

 

ggsci > edit params mgr

 

使用ggsci edit params命令是有好处的,避免了编辑错文件的麻烦。 OGG的参数文件可以使用系统中的任何文本编辑器修改。 必须以mgr.prm为文件名,且必须存放在./dirprm这个OGG的安装子目录下。

 

适用于Manager的参数有那么一大堆,这里我们只介绍那些对于最佳实践有用的参数。

 

若源端将用在一个双向复制的环境中,则所有这些参数在目标端也可以相似设置。若源端不会被当做目标端,则需要考虑的参数更少了。

 

 

PORT 7809

 

–这里MGR的PORT是最常见的参数了,该参数指定了接受TCP通信的端口,这是一个必要参数。一般来说7809是一个推荐使用的端口

 

DYNAMICPORTLIST 9101 – 9356
DYNAMICPORTSREASSIGNDELAY 5

 

–动态端口参数对于只充当source的一端是不必要的,一般只需要在target端设置动态端口参数

 

PURGEOLDEXTRACTS ./dirdat/*, usecheckpoints, minkeephours 96

 

Manager可以负责trail文件的定期清理工作,minkeephours 96意为清理96个小时即4天前的trail

 

LAGINFOSECONDS 15
LAGCRITICALMINUTES 2

 

以上2个参数定义了LAG REPORT的时间间隔

 

 

BOOTDELAYMINUTES 3

 

以上BOOTDELAYMINUTES仅对windows有效,意为Windows启动3分钟后再BOOT OGG MGR

 

AUTOSTART ER *

 

AUTOSTART 意为当MGR启动时启动相应的EXTRACT和REPLICAT

 

AutoRestart ER *, WaitMinutes 5, Retries 3

 

AUTORESTART 意为自动启动失败的OGG进程,上面指定了只重试一次

 

PurgeMarkerHistory MinKeepDays 3, MaxKeepDays 7, FrequencyMinutes 120

 

以上PurgeMarkerHistory定义了清理DDL复制数据策略

 

CHECKMINUTES 10

 

CHECKMINUTES 定义了 MGR检查和告警的频率,以上定义为10分钟一次

 

DOWNCRITICAL

 

若手动停止OGG进程与abend的进程同样有必要告警,则使用以上DOWNCRITICAL参数,一般不需要设置

 

 

注意Manager参数仅在启动时生效,若该参数文件发生了改变,manager需要重启才能生效。

 

OGG进程启动时从Manager中获取必要的信息。若一个Extract或Replicat从Manager中请求的信息,则若Manager修改了参数则不仅Manager需要重启,连带的其他进程也需要重启才能生效,Refresh命令对MGR的参数刷新不可信赖。

 

 

一个较完备的MGR PARAMS

 

GGSCI (XIANGBLI-CN) 2> view params mgr

 

Port 7809

UserId goldengate, Password goldengate

CheckMinutes 10
PurgeOldExtracts ./dirdat/*, UseCheckpoints, MinKeepHours 96
PurgeMarkerHistory MinKeepDays 3, MaxKeepDays 7, FrequencyMinutes 120
AutoRestart ER *, WaitMinutes 5, Retries 3
LagInfoMinutes 0
LagReportMinutes 10

 

 

 

 

 

GGSCI (XIANGBLI-CN) 4> start mgr

 

Manager started.

Slide:配置GoldenGate同步DDL语句

Goldengate OGG常见问题与错误列表

以下列出了OGG一些常见的问题与错误及其解答:

Note: 966211.1 How To Resync A Single Table With Minimum Impact To Other Tables’ Replication?
Note: 966227.1 Troubleshooting TCP/IP Errors In Open Systems
Note: 972954.1 What Causes The ‘Bad Column Index(xxxx)’ Error In Replicat?
Note: 987379.1 How to recover from Extract ERROR 180 encountered commit SCN that is not greater than the highest SCN already processed
Note: 1112506.1 GoldenGate Extract Abends with “Unable to lock file” Error For Trail File
Note: 1124403.1 When Replicat is Running but Not Applying Records
Note: 1138409.1 EXTRACT / REPLICAT CHECKPOINT RBA IS LARGER THAN LOCAL TRAILFILE SIZE, AFTER SERVER CRASH
Note: 1163073.1 When using rmthost with timeout option, there are orphaned data collector processes on target system
Note: 1266389.1 Oracle GoldenGate and compressed tables
Note: 1270168.1 OGG-01028 Compression Not Supported – when extract processes regular and cluster tables
Note: 1276538.1 Replicat abending with missing key cols
Note: 1280584.1 Where To Find The List Of Supported And Unsupported Oracle Data Types for GoldenGate
Note: 1288562.1 GG Data Pumps Error With OGG-01031 When Upgrading the Target Side
Note: 957112.1 Encountered SCN That Is Not Greater Than The Highest SCN Already Processed
Note: 1271578.1 How to Handle Tables without Primary Keys or Unique Indexes with Oracle GoldenGate
Note: 957053.1 Recovering From a GG Pump Process Abending With An Incompatible Record Error
Note: 968622.1 Does GoldenGate Support Tables With An UNUSED Column?
Note: 968614.1 Why Does GoldenGate Report The Lag Is Unknown or a Timestamp Mismatch?
Note: 968769.1 GoldenGate Errors That Occur In Teradata Extract Setting
Note: 1163073.1 When using rmthost with timeout option, there are orphaned data collector processes on target system
Note: 964709.1 How To Handle Oracle DDLs (add/drop/resize a Column) 22 19 1
Note: 1300076.1 Usage and considerations for allocation of ports and DYNAMICPORTLIST
Note: 1232303.1 mount options to use when configuring GoldenGate to run on NFS mounted file system
Note: 1298548.1 Extract not accepting CACHESIZE settings
Note: 1300197.1 Logger not logging cobol transactions for GGSLIB with non default AUDCFG location
Note: 1273285.1 How To Troubleshoot Oracle Redo Log Reading Extract Slow Performance Issue
Note: 965356.1 GGS Error 150: No Dynamic Ports Available
Note: 1199561.1 Characterset Conversion: Error 160 Bad column length
Note: 1159684.1 Extract abends,mss: missing trailing blocks, unable to queue I/O, I/O beyond file size
Note: 1292874.1 GoldenGate Extract Abends on Startup with Error OGG-01088 Realloc xxx Bytes Failed.
Note: 1271522.1 Oracle GoldenGate (OGG) 11.1>>> .1>>> .0 and Transparent Data Encryption (TDE)
Note: 965270.1 How does the Manager Allocate TCPIP Ports?
Note: 1195995.1 Implementing GoldenGate for DataGuard fail overs
Note: 965373.1 Installing And Configuring GoldenGate In An Oracle RAC Environment
Note: 1276058.1 Oracle GoldenGate Best Practices: Instantiation from an Oracle Source Database
Note: 971264.1 How To Upgrade GGS Objects In A DDL Replication Environment
Note 957112.1 Encountered SCN That Is Not Greater Than The Highest SCN Already Processed

Note 969550.1 Using SCN To Do The Initial Load From Oracle To Oracle Database
Note 966211.1 How To Resync A Single Table With Minimum Impact To Other Tables’ Replication?
Note 1235986.1 Does Oracle Goldengate extract support distributed transactions?
Note 966227.1 Troubleshooting TCP/IP Errors In Open Systems
Note 972954.1 What Causes The ‘Bad Column Index(xxxx)’; Error In Replicat?
Note 987379.1 How to recover from Extract ERROR 180 encountered commit SCN that is not greater than the highest SCN already processed (GG Version 10)
Note 1112325.1 Deploying OGG to Achieve Operational Reporting for Oracle E-Business Suite
Note 1112506.1 GoldenGate Extract Abends with “Unable to lock file” Error For Trail File
Note 1120793.1 How to Get More Information than from Just a Trace on LINUX Boxes for GoldenGate
Note 1124403.1 When Replicat is Running but Not Applying Records
Note 1138409.1 EXTRACT / REPLICAT CHECKPOINT RBA IS LARGER THAN LOCAL TRAILFILE SIZE, AFTER SERVER CRASH
Note 1163073.1 When using rmthost with timeout option, there are orphaned data collector processes on target system
Note 1266389.1 Oracle GoldenGate and compressed tables
Note 1270168.1 OGG-01028 Compression Not Supported – when extract processes regular and cluster tables
Note 1272645.1 Maintaining the OGG Marker table
Note 1276058.1 Oracle GoldenGate Best Practices: Instantiation from an Oracle Source Database
Note 1276538.1 Replicat abending with missing key cols
Note 1280584.1 Where To Find The List Of Supported And Unsupported Oracle Data Types for GoldenGate
Note 1288562.1 GG Data Pumps Error With OGG-01031 When Upgrading the Target Side

 

How to set LOGSOURCE PATHMAP on GoldenGate

Question:
the source machine os is solaris ,the mid machine os is linux ,
the target machine os is solaris,i want extract archive log file and pump trail file
to target on mid machine , i check GoldenGate Reference Guide version 10.4 about TRANLOGOPTIONS,
must set ARCHIVEDLOGONLY and LOGSOURCE PATHMAP , in my environment ,how to set LOGSOURCE PATHMAP ?

Answer:
There two primary things that need to be met when extracting the data remotely.

1. Endian order (How the bytes are stored)
Big endian OS    : AIX, HPUX, MVS, SOLARIS, S290
Little endian OS : LINUX, VMS, WINDOWS

2. Bit (32 or 64 bit).

If your source is SOLARIS(Big Endian) and mid machine where archive logs resides as LINUX(Little Endian),
then you cannot use LOGSOURCE as it violates the ENDIAN order.

For exampel you can either have source and mid machine as solaris solaris or AIX ,HPUX  or vice versa

The general syntax to extract data from transaction logs which resides on a platform other than the one
which hosts the database If you the source and mid machine are Solaris you can use the following syntax.

TRANLOGOPTIONS, LOGSOURCE SOLARIS, PATHMAP

Please refer Oracle GoldenGate Reference guide page 356 for the example

沪ICP备14014813号-2

沪公网安备 31010802001379号