About Me

Thursday 15 October 2015

Archive Log and Redo Generation Rate

Last One Year archive log generation
----------------------------------------------------

select THREAD#,trunc(next_time),count(*)
from V$ARCHIVED_LOG
where trunc(next_time) between trunc(sysdate-366) and trunc(sysdate)
and DEST_ID =1
group by THREAD#,trunc(next_time)
order by 2 ;

Last One Week archive log generation
---------------------------------------------------------

select THREAD#,trunc(next_time),count(*)
from V$ARCHIVED_LOG
where trunc(next_time) between trunc(sysdate-7) and trunc(sysdate)
and DEST_ID =1
group by THREAD#,trunc(next_time)
order by trunc(next_time) desc ;

Per Month archive log generation
----------------------------------------------------

set pages 8000
select count(name), to_char(next_time,'mon')
from v$archived_log
group by to_char(next_time,'mon') order by 1;



Query to find  HOURLY archive log generation 
---------------------------------------------------------------

set pagesize 30;
set lines 300;
col day for a17;
col 00 for a5
col 01 for a5
col 02 for a5
col 03 for a5
col 04 for a5
col 05 for a5
col 06 for a5
col 07 for a5
col 08 for a5
col 09 for a5
col 10 for a5
col 11 for a5
col 12 for a5
col 13 for a5
col 14 for a5
col 15 for a5
col 16 for a5
col 17 for a5
col 18 for a5
col 19 for a5
col 20 for a5
col 21 for a5
col 22 for a5
col 23 for a5
select
  to_char(first_time,'YY-MM-DD') day,
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'00',1,0)),'999') "00",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'01',1,0)),'999') "01",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'02',1,0)),'999') "02",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'03',1,0)),'999') "03",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'04',1,0)),'999') "04",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'05',1,0)),'999') "05",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'06',1,0)),'999') "06",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'07',1,0)),'999') "07",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'08',1,0)),'999') "08",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'09',1,0)),'999') "09",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'10',1,0)),'999') "10",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'11',1,0)),'999') "11",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'12',1,0)),'999') "12",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'13',1,0)),'999') "13",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'14',1,0)),'999') "14",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'15',1,0)),'999') "15",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'16',1,0)),'999') "16",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'17',1,0)),'999') "17",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'18',1,0)),'999') "18",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'19',1,0)),'999') "19",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'20',1,0)),'999') "20",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'21',1,0)),'999') "21",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'22',1,0)),'999') "22",
  to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'23',1,0)),'999') "23",
  COUNT(*)
from v$log_history
group by to_char(first_time,'YY-MM-DD')
order by day ;

HOURLY
----------------
set pages 100
set lines 200
select count(name) COUNT, to_char(next_time,'mm:dd:yy') "DATE" ,to_char(next_time,'hh24') "HOUR"
from v$archived_log
group by to_char(next_time,'mm:dd:yy'),to_char(next_time,'hh24') order by to_char(next_time,'mm:dd:yy');

Query to find  REDO APPLY RATE ON STANDBY
------------------------------------------------------

Set Linesize 400
Col Values For A65
Col Recover_start For A21
Select To_char(START_TIME,'Dd.mm.yyyy Hh24:Mi:ss') "Recover_start",To_char(Item)||' = '||To_char(Sofar)||' '||To_char(Units)||' '|| To_char(TIMESTAMP,'Dd.mm.yyyy Hh24:Mi') "Values" From V$Recovery_progress Where Start_time=(Select Max(Start_time) From V$Recovery_progress);

Query to find Redo size generated per day
-------------------------------------------

SELECT SUM_ARCH.DAY,
         SUM_ARCH.GENERATED_GB,
         SUM_ARCH_DEL.DELETED_GB,
         SUM_ARCH.GENERATED_GB - SUM_ARCH_DEL.DELETED_GB "REMAINING_GB"
    FROM (  SELECT TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY') DAY,
                   SUM (ROUND ( (blocks * block_size)/(1024*1024*1024), 2))
                      GENERATED_GB
              FROM V$ARCHIVED_LOG
             WHERE ARCHIVED = 'YES'
          GROUP BY TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY')) SUM_ARCH,
         (  SELECT TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY') DAY,
                   SUM (ROUND ( (blocks * block_size) / (1024 * 1024 * 1024), 2))
                      DELETED_GB
              FROM V$ARCHIVED_LOG
             WHERE ARCHIVED = 'YES' AND DELETED = 'YES'
          GROUP BY TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY')) SUM_ARCH_DEL
   WHERE SUM_ARCH.DAY = SUM_ARCH_DEL.DAY(+)
ORDER BY TO_DATE (DAY, 'DD/MM/YYYY');

Query to find Archive log generation on an hourly basis:
----------------------------------------------------------------------------

set pages 100
select trunc(COMPLETION_TIME,'HH') Hour,thread# , round(sum(BLOCKS*BLOCK_SIZE)/1048576) MB,count(*) Archives from v$archived_log
group by trunc(COMPLETION_TIME,'HH'),thread#  order by 1 ;


Query to find User Generating more redo
-----------------------------------------------------------

SELECT  s.sid, s.serial#, s.username, s.program, i.block_changes,(i.block_changes*8)/1024/1024 GB_redo
FROM gv$session s, gv$sess_io i
WHERE s.sid = i.sid AND i.block_changes > 0 and s.username is not null and rownum<20
ORDER BY 5 DESC, 1;


col username for a15
col program for a30
col sql_text for a100
SELECT
s.sid,
s.username,
s.program,
ROUND(t.VALUE/(1024*1024)) AS "Redo Size MB",
sa.sql_text
FROM V$SESSION s, V$SESSTAT t, V$STATNAME sn, V$SQLAREA sa
WHERE s.sid = t.sid
AND t.statistic# = sn.statistic#
AND sn.name = 'redo size'
AND sa.sql_id = s.sql_id
AND ROUND(t.VALUE/(1024*1024)) != 0 and rownum<20
ORDER BY t.VALUE DESC;


col machine for a25
col username for a15
col redo_MB for 999G990 heading “Redo |Size MB”
column sid_serial for a13;
select b.inst_id,
       lpad((b.SID || ',' || lpad(b.serial#,5)),11) sid_serial,
       b.username,
       machine,
       b.osuser,
       b.status,
       a.redo_mb
from (select n.inst_id, sid,
             round(value/1024/1024) redo_mb
        from gv$statname n, gv$sesstat s
        where n.inst_id=s.inst_id
              and n.name = 'redo size'
              and s.statistic# = n.statistic#
        order by value desc) a,
     gv$session b
where b.inst_id=a.inst_id  and a.sid = b.sid  and   rownum <= 10  and b.username is not null order by redo_mb desc;



Undo Blocks accessed
-----------------------
SELECT s.sid, s.serial#, s.username, s.program, s.machine, t.used_ublk, t.used_urec
FROM v$session s, v$transaction t
WHERE s.taddr = t.addr
ORDER BY 6, 7 desc;

Tuesday 13 October 2015

Standby setup using RMAN duplicate command

We are going to set up primary and standby database as below.

primary datbase name  : demodb1 at server test1.india.com
standby database name : demodb2 at server test2.india.com

1. check archive log mode on primary database.If not in archive log mode then put in archive log mode and force logging .

SQL> archive log list
Database log mode              Archive Mode
Automatic archival             Enabled
Archive destination            /data1/demodb1/arch
Oldest online log sequence     29
Next log sequence to archive   31
Current log sequence           31

SQL> alter database force logging;
Database altered.

SQL> select log_mode,force_logging from v$database;
LOG_MODE FOR
------------ ---
ARCHIVELOG YES

2. Make sure you have cross network between primary and standby servers and able to ping with tnsping as below with tnsnames

FROM PRIMARY to STANDBY
-------------------------------
[oracle@test1 ~]$ tnsping demodb2
TNS Ping Utility for Linux: Version 11.2.0.1.0 - Production on 12-OCT-2015 12:12:15
Copyright (c) 1997, 2009, Oracle.  All rights reserved.
Used parameter files:
Used TNSNAMES adapter to resolve the alias
Attempting to contact (DESCRIPTION = (ADDRESS_LIST = (ADDRESS = (PROTOCOL = TCP)(HOST=test2.india.com)(PORT=1521))) (CONNECT_DATA = (SID = demodb2)))
OK (0 msec)
[oracle@test1 ~]$

FROM STANDBY TO PRIMARY
--------------------------------
[oracle@test2 ~]$ tnsping demodb1
TNS Ping Utility for Linux: Version 11.2.0.1.0 - Production on 09-OCT-2015 15:17:38
Copyright (c) 1997, 2009, Oracle.  All rights reserved.
Used parameter files:
Used TNSNAMES adapter to resolve the alias
Attempting to contact (DESCRIPTION = (ADDRESS_LIST = (ADDRESS = (PROTOCOL = TCP)(HOST=test1.india.com)(PORT=1521))) (CONNECT_DATA = (SID = demodb1)))
OK (0 msec)
[oracle@test2 ~]$

3. Now we need to copy password file from primary dbs to standby database dbs location with name of sid of standby database . In our case it is demodb2.

[oracle@test1 dbs]$ ls -lrth orapwdemodb1
-rw-r----- 1 oracle oinstall 1.5K Aug 14 12:35 orapwdemodb1

[oracle@test1 dbs]$ scp orapwdemodb1 test2:/u01/app/oracle/product/11.2.0/dbhome_1/dbs/orapwdemodb2
oracle@test2's password:
orapwdemodb1                                  100% 1536     1.5KB/s   00:00
[oracle@test1 dbs]$

On standby server
--------------------------
[oracle@test2 dbs]$ ls -lrth orapwdemodb2
-rw-r----- 1 oracle oinstall 1.5K Oct  9 15:23 orapwdemodb2

4. We will setup dataguard related parameters in our primary database.

alter system set log_archive_dest_2='SERVICE=demodb2  ASYNC valid_for=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=demodb2';

alter system set log_archive_dest_state_2='enable';

alter system set FAL_SERVER=demodb2;

alter system set FAL_CLIENT=demodb1;

alter system set DB_FILE_NAME_CONVERT='/data2/demodb2/','/data2/demodb1/' scope=spfile;

alter system set LOG_FILE_NAME_CONVERT='/data2/demodb2/','/data2/demodb1/' scope=spfile;

alter system set LOG_ARCHIVE_CONFIG='DG_CONFIG=(demodb1,demodb2)';

5. Lets start standby database in nomout state with setting up Oracle SID and other parameters . Create pfile from spfile on primary and ship to standby server if you are running primary database with spfile .

[oracle@test2 ~]$ mkdir /data2/demodb2
[oracle@test2 ~]$ mkdir /data2/demodb2/arch

[oracle@test2 ~]$ ls -lrth pfile.ora
-rw-r--r-- 1 oracle oinstall 908 Oct  9 15:47 pfile.ora

[oracle@test2 ~]$ cat pfile.ora
*.db_name='demodb1'
db_unique_name='demodb2'

6. Add standby logfile on primary database equal to online logfiles .

SQL> select GROUP#,THREAD#,MEMBERS,BYTES FROM V$LOG;

    GROUP#    THREAD#    MEMBERS      BYTES
---------- ---------- ---------- ----------
         1          1          1   10485760
         2          1          1   10485760
         3          1          1   10485760

SQL> alter database add standby logfile '/data2/demodb1/stdbylog1.log'  size 10485760;

Database altered.

SQL> alter database add standby logfile '/data2/demodb1/stdbylog2.log'  size 10485760;

Database altered.

SQL> alter database add standby logfile '/data2/demodb1/stdbylog3.log'  size 10485760;

Database altered.

SQL> SELECT GROUP#, BYTES FROM V$STANDBY_LOG;

    GROUP#      BYTES
---------- ----------
         4   10485760
         5   10485760
         6   10485760

7.  We will create script to start the standby creation using RMAN. We will add parameters related to standby in this script .

[oracle@test1 ~]$ cat duplicate.sh
run {
allocate channel prmy1 type disk;
allocate channel prmy2 type disk;
allocate channel prmy3 type disk;
allocate channel prmy4 type disk;
allocate auxiliary channel stby type disk;
duplicate target database for standby from active database
spfile
 parameter_value_convert 'demodb1','demodb2'
 set db_unique_name='demodb2'
 set db_file_name_convert='/data2/demodb1/','/data2/demodb2/'
 set log_file_name_convert='/data2/demodb1/','/data2/demodb2/'
 set control_files='/data2/demodb2/control01.ctl','/data2/demodb2/control02.ctl'
 set log_archive_max_processes='10'
 set fal_client='demodb2'
 set fal_server='demodb1'
 set standby_file_management='AUTO'
 set log_archive_config='dg_config=(demodb1,demodb2)'
 set log_archive_dest_2='service=demodb1 ASYNC valid_for=(ONLINE_LOGFILE,PRIMARY_ROLE) db_unique_name=demodb1'
 set log_archive_dest_1='location=/data2/demodb2/arch'
 set audit_file_dest='/data2/demodb2/admin'
;
}

We will run this script on RMAN PROMPT

[oracle@test1 ~]$ rman target sys/oracle@demodb1 auxiliary sys/oracle@demodb2

Recovery Manager: Release 11.2.0.1.0 - Production on Wed Oct 14 00:02:54 2015

Copyright (c) 1982, 2009, Oracle and/or its affiliates.  All rights reserved.

connected to target database: DEMODB1 (DBID=3591110281)
connected to auxiliary database: DEMODB1 (DBID=3591110281)

RMAN>@duplicate.sh
.
.
.


Starting Duplicate Db at 12-OCT-15

contents of Memory Script:
{
   backup as copy reuse
   targetfile  '/u01/app/oracle/product/11.2.0/dbhome_1/dbs/orapwdemodb1' auxiliary format
 '/u01/app/oracle/product/11.2.0/dbhome_1/dbs/orapwdemodb2'   targetfile
 '/u01/app/oracle/product/11.2.0/dbhome_1/dbs/spfiledemodb1.ora' auxiliary format
 '/u01/app/oracle/product/11.2.0/dbhome_1/dbs/spfiledemodb2.ora'   ;
   sql clone "alter system set spfile= ''/u01/app/oracle/product/11.2.0/dbhome_1/dbs/spfiledemodb2.ora''";
}
executing Memory Script

Starting backup at 12-OCT-15
Finished backup at 12-OCT-15

sql statement: alter system set spfile= ''/u01/app/oracle/product/11.2.0/dbhome_1/dbs/spfiledemodb2.ora''

contents of Memory Script:
{
   sql clone "alter system set  dispatchers =
 ''(PROTOCOL=TCP) (SERVICE=demodb2XDB)'' comment=
 '''' scope=spfile";
   sql clone "alter system set  db_unique_name =
 ''demodb2'' comment=
 '''' scope=spfile";
   sql clone "alter system set  db_file_name_convert =
 ''/data2/demodb1/'', ''/data2/demodb2/'' comment=
 '''' scope=spfile";
   sql clone "alter system set  log_file_name_convert =
 ''/data2/demodb1/'', ''/data2/demodb2/'' comment=
 '''' scope=spfile";
   sql clone "alter system set  control_files =
 ''/data2/demodb2/control01.ctl'', ''/data2/demodb2/control02.ctl'' comment=
 '''' scope=spfile";
   sql clone "alter system set  log_archive_max_processes =
 10 comment=
 '''' scope=spfile";
   sql clone "alter system set  fal_client =
 ''demodb2'' comment=
 '''' scope=spfile";
   sql clone "alter system set  fal_server =
 ''demodb1'' comment=
 '''' scope=spfile";
   sql clone "alter system set  standby_file_management =
 ''AUTO'' comment=
 '''' scope=spfile";
   sql clone "alter system set  log_archive_config =
 ''dg_config=(demodb1,demodb2)'' comment=
 '''' scope=spfile";
   sql clone "alter system set  log_archive_dest_2 =
 ''service=demodb1 ASYNC valid_for=(ONLINE_LOGFILE,PRIMARY_ROLE) db_unique_name=demodb1'' comment=
 '''' scope=spfile";
   sql clone "alter system set  log_archive_dest_1 =
 ''location=/data2/demodb2/arch'' comment=
 '''' scope=spfile";
   sql clone "alter system set  audit_file_dest =
 ''/data2/demodb2/admin'' comment=
 '''' scope=spfile";
   shutdown clone immediate;
   startup clone nomount;
}
executing Memory Script

sql statement: alter system set  dispatchers =  ''(PROTOCOL=TCP) (SERVICE=demodb2XDB)'' comment= '''' scope=spfile

sql statement: alter system set  db_unique_name =  ''demodb2'' comment= '''' scope=spfile

sql statement: alter system set  db_file_name_convert =  ''/data2/demodb1/'', ''/data2/demodb2/'' comment= '''' scope=spfile

sql statement: alter system set  log_file_name_convert =  ''/data2/demodb1/'', ''/data2/demodb2/'' comment= '''' scope=spfile

sql statement: alter system set  control_files =  ''/data2/demodb2/control01.ctl'', ''/data2/demodb2/control02.ctl'' comment= '''' scope=spfile

sql statement: alter system set  log_archive_max_processes =  10 comment= '''' scope=spfile

sql statement: alter system set  fal_client =  ''demodb2'' comment= '''' scope=spfile

sql statement: alter system set  fal_server =  ''demodb1'' comment= '''' scope=spfile

sql statement: alter system set  standby_file_management =  ''AUTO'' comment= '''' scope=spfile

sql statement: alter system set  log_archive_config =  ''dg_config=(demodb1,demodb2)'' comment= '''' scope=spfile

sql statement: alter system set  log_archive_dest_2 =  ''service=demodb1 ASYNC valid_for=(ONLINE_LOGFILE,PRIMARY_ROLE) db_unique_name=demodb1'' comment= '''' scope=spfile

sql statement: alter system set  log_archive_dest_1 =  ''location=/data2/demodb2/arch'' comment= '''' scope=spfile

sql statement: alter system set  audit_file_dest =  ''/data2/demodb2/admin'' comment= '''' scope=spfile

Oracle instance shut down

connected to auxiliary database (not started)
Oracle instance started

Total System Global Area     263639040 bytes

Fixed Size                     1335892 bytes
Variable Size                188747180 bytes
Database Buffers              67108864 bytes
Redo Buffers                   6447104 bytes
allocated channel: stby
channel stby: SID=63 device type=DISK

contents of Memory Script:
{
   backup as copy current controlfile for standby auxiliary format  '/data2/demodb2/control01.ctl';
   restore clone controlfile to  '/data2/demodb2/control02.ctl' from
 '/data2/demodb2/control01.ctl';
}
executing Memory Script

Starting backup at 12-OCT-15
channel prmy1: starting datafile copy
copying standby control file
output file name=/u01/app/oracle/product/11.2.0/dbhome_1/dbs/snapcf_demodb1.f tag=TAG20151012T141257 RECID=2 STAMP=892908778
channel prmy1: datafile copy complete, elapsed time: 00:00:07
Finished backup at 12-OCT-15

Starting restore at 12-OCT-15

channel stby: copied control file copy
Finished restore at 12-OCT-15

contents of Memory Script:
{
   sql clone 'alter database mount standby database';
}
executing Memory Script

sql statement: alter database mount standby database

contents of Memory Script:
{
   set newname for tempfile  1 to
 "/data2/demodb2/temp01.dbf";
   switch clone tempfile all;
   set newname for datafile  1 to
 "/data2/demodb2/system01.dbf";
   set newname for datafile  2 to
 "/data2/demodb2/sysaux01.dbf";
   set newname for datafile  3 to
 "/data2/demodb2/undotbs01.dbf";
   set newname for datafile  4 to
 "/data2/demodb2/users01.dbf";
   set newname for datafile  5 to
 "/data2/demodb2/example01.dbf";
   set newname for datafile  6 to
 "/data2/demodb2/ggs_data.dbf";
   backup as copy reuse
   datafile  1 auxiliary format
 "/data2/demodb2/system01.dbf"   datafile
 2 auxiliary format
 "/data2/demodb2/sysaux01.dbf"   datafile
 3 auxiliary format
 "/data2/demodb2/undotbs01.dbf"   datafile
 4 auxiliary format
 "/data2/demodb2/users01.dbf"   datafile
 5 auxiliary format
 "/data2/demodb2/example01.dbf"   datafile
 6 auxiliary format
 "/data2/demodb2/ggs_data.dbf"   ;
   sql 'alter system archive log current';
}
executing Memory Script

executing command: SET NEWNAME

renamed tempfile 1 to /data2/demodb2/temp01.dbf in control file

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

Starting backup at 12-OCT-15
channel prmy1: starting datafile copy
input datafile file number=00001 name=/data2/demodb1/system01.dbf
channel prmy2: starting datafile copy
input datafile file number=00002 name=/data2/demodb1/sysaux01.dbf
channel prmy3: starting datafile copy
input datafile file number=00006 name=/data2/demodb1/ggs_data.dbf
channel prmy4: starting datafile copy
input datafile file number=00005 name=/data2/demodb1/example01.dbf
output file name=/data2/demodb2/example01.dbf tag=TAG20151012T141316
channel prmy4: datafile copy complete, elapsed time: 00:01:35
channel prmy4: starting datafile copy
input datafile file number=00003 name=/data2/demodb1/undotbs01.dbf
output file name=/data2/demodb2/ggs_data.dbf tag=TAG20151012T141316
channel prmy3: datafile copy complete, elapsed time: 00:02:31
channel prmy3: starting datafile copy
input datafile file number=00004 name=/data2/demodb1/users01.dbf
output file name=/data2/demodb2/undotbs01.dbf tag=TAG20151012T141316
channel prmy4: datafile copy complete, elapsed time: 00:00:55
output file name=/data2/demodb2/users01.dbf tag=TAG20151012T141316
channel prmy3: datafile copy complete, elapsed time: 00:00:25
output file name=/data2/demodb2/sysaux01.dbf tag=TAG20151012T141316
channel prmy2: datafile copy complete, elapsed time: 00:08:22
output file name=/data2/demodb2/system01.dbf tag=TAG20151012T141316
channel prmy1: datafile copy complete, elapsed time: 00:09:22
Finished backup at 12-OCT-15

sql statement: alter system archive log current

contents of Memory Script:
{
   switch clone datafile all;
}
executing Memory Script

datafile 1 switched to datafile copy
input datafile copy RECID=2 STAMP=892661300 file name=/data2/demodb2/system01.dbf
datafile 2 switched to datafile copy
input datafile copy RECID=3 STAMP=892661300 file name=/data2/demodb2/sysaux01.dbf
datafile 3 switched to datafile copy
input datafile copy RECID=4 STAMP=892661300 file name=/data2/demodb2/undotbs01.dbf
datafile 4 switched to datafile copy
input datafile copy RECID=5 STAMP=892661300 file name=/data2/demodb2/users01.dbf
datafile 5 switched to datafile copy
input datafile copy RECID=6 STAMP=892661300 file name=/data2/demodb2/example01.dbf
datafile 6 switched to datafile copy
input datafile copy RECID=7 STAMP=892661300 file name=/data2/demodb2/ggs_data.dbf
Finished Duplicate Db at 12-OCT-15
released channel: prmy1
released channel: prmy2
released channel: prmy3
released channel: prmy4
released channel: stby

RMAN>
RMAN> **end-of-file**

RMAN>

Put standby database in recovery mode and when completed , Open the standby database ;

SQL>  alter database recover managed standby database disconnect;

SQL>  alter database recover managed standby database cancel;

SQL> alter database open;

SQL>  alter database recover managed standby database disconnect;

You can keep you standby database in realtime apply mode where it will not wait for redologs to be archived instead it will apply redo by reading current logfile on standby database .

SQL>  alter database recover managed standby database using current logfile disconnect;

Database altered.

You can verify same from alert log as below.

Sun Oct 11 03:38:30 2015
alter database recover managed standby database using current logfile disconnect
Attempt to start background Managed Standby Recovery process (demodb2)
Sun Oct 11 03:38:30 2015
MRP0 started with pid=34, OS id=4155
MRP0: Background Managed Standby Recovery process started (demodb2)
 started logmerger process
Sun Oct 11 03:38:35 2015
Managed Standby Recovery starting Real Time Apply
Parallel Media Recovery started with 4 slaves
Waiting for all non-current ORLs to be archived...
All non-current ORLs have been archived.
Media Recovery Waiting for thread 1 sequence 40 (in transit)
Recovery of Online Redo Log: Thread 1 Group 5 Seq 40 Reading mem 0
  Mem# 0: /data2/demodb2/stdbylog2.log
Completed:  alter database recover managed standby database using current logfile disconnect