blog menu1

DataUpload

DataUpload


ALTER TABLE customers STORAGE (CELL_FLASH_CACHE KEEP) -- DEFAULT, KEEP and NONE


alter session set"cell_offload_processing"FALSE;

select inst_id, s.name, m.value cell_initial_bytes from gv$mystat m, gv$statname s where s.statistic# = m.statistic# and name like '%cell%';
col name format a50
SELECT sn.INST_ID, sn.name, ss.value FROM gv$sesstat ss, gv$statname sn,gv$session s WHERE ss.statistic# = sn.statistic# AND s.sid = ss.sid AND s.audsid = SYS_CONTEXT('USERENV','SESSIONID') AND sn.name LIKE '%cell%';
 
SELECT sn.INST_ID, sn.name, ss.value FROM gv$sesstat ss, gv$statname sn,gv$session s WHERE ss.statistic# = sn.statistic# AND s.audsid = SYS_CONTEXT('USERENV','SESSIONID') AND sn.name LIKE '%cell%' AND s.sid = ss.sid;
create table tt_new_compress compress for query high as select * from dba_tables;
alter table tt compress for query high;
create table tt ( a number ) compress for query high;
create table tt ( a number ) compress for query low;
create table tt ( a number ) compress for archive high;
create table tt ( a number ) compress for archive low;






analyze table emp partition(p1) compute statistics;
select table_name, partition_name, num_rows from user_tab_partitions;

EXEC DBMS_STATS.GATHER_TABLE_STATS ('SCHEMA_NAME', 'TABLE_NAME', 'PARTITION_NAME', ESTIMATE_PERCENT => DBMS_STATS.AUTO_SAMPLE_SIZE, METHOD_OPT=> 'FOR ALL COLUMNS SIZE AUTO');

1. Create OS level directories and give full 777 permission on the directories.
2. Create directories for - Data , Log, Rejected Records, Pre-processing executable. -- CREATE [OR REPLACE] DIRECTORY directory_name AS 'path_name';
3. Give Read, Write, Execute Permission on these above directories to an appropriate users.- GRANT read, write ON DIRECTORY tmp TO scott;
4. Check the OS level File structure /data type and the data type defined at the table level to avoid implicit / explicit data conversion
5. Check the need of compression if files are located on the NTFS or remote server to fast transfer,
6. Create external table - the syntax is -
CREATE TABLE countries_ext (country_code VARCHAR2(5), country_name VARCHAR2(50), country_language VARCHAR2(50))
OGANIZATION EXTERNAL (
TYPE ORACLE_LOADER
PREPROCESSOR exec_dir : 'zcat'
DEFAULT DIRECTORY ext_tab_data
ACCESS PARAMETERS (
RECORDS DELIMITED BY NEWLINE
FIELDS TERMINATED BY ','
MISSING FIELD VALUES ARE NULL
(country_code CHAR(5), country_name CHAR(50),country_language CHAR(50)
))LOCATION ('Countries1.txt','Countries2.txt')
)
PARALLEL 5
REJECT LIMIT UNLIMITED;

OR

CREATE TABLE Test (
CALL_NUMBER VARCHAR2(30 BYTE), START_TIME_ESEC NUMBER(20), START_TIME_MSEC NUMBER(10) )
ORGANIZATION EXTERNAL
( TYPE ORACLE_LOADER
DEFAULT DIRECTORY TEST
ACCESS PARAMETERS
( RECORDS DELIMITED BY NEWLINE FIELDS TERMINATED BY ','
logfile DATA_PUMP_DIR:'MY_log.log'
(CALL_NUMBER CHAR(30),START_TIME_ESEC INTEGER EXTERNAL(20),START_TIME_MSEC INTEGER EXTERNAL(10)))
LOCATION ('adr-CEP_SAMPLE_1.csv'))
REJECT LIMIT UNLIMITED
PARALLEL ( DEGREE DEFAULT INSTANCES DEFAULT )
NOMONITORING;



Partition need - 


7. Parallel Parameters -
1. Parallel_min_servers = 2 * Parallel degree size
2. Parallel_max_server = Dont set
3. Parallel_Execution_message_size = 16K
4. Parallel_degree_policy = AUTO
5. Parallel_degree_limit = Please check documents
6. Parallel_Server_limit = Set close to Parallel_max_servers i.e. 90% of parallel_max_servers.
7.

8. Memory Settings - Memory_Max_Size, Memory_Target, PGA_AGREEGATE_TARGET etc

9. Use BigFile type tablespace.
10. Parallel load use DIRECT LOAD method by default. SO no need to worry for the data getting flushed from the SGA.
11. Set EXTENT management AUTOALLOCATE to save unused space and performance issues.
12. Pre-create TABLE with LARGE initial Extents and set large extents (8M recommended by ORACLE ) after it for better performance,

CREATE BIGFILE TABLESPACE user_tbs DATAFILE '/u02/tbs01.dbf' SIZE 300G REUSE AUTOEXTENT ON NEXT 15G MAXSIZE UNLIMITED
LOGGING -- This is Default .. Consider using NOLOGGING
EXTENT MANAGEMENT LOCAL AUTOALLOCATE -- This is Default
SEGMENT SPACE MANAGEMENT AUTO -- This is Default

13. Alter session enable parallel DML;
14. Upload data into a temp table and them swap into a partition table 






























15. Alter table sales exchange partition day_2004_jan1 with temp_sales including indexes;
Alter table sales exchange partition day_2004_jan1 with temp_sales including indexes without validation;

16. Invalidate and rebuild indexes in case of loading 50% more data into partition.
Alter table sales exchange partition day_2004_jan1 unusable local indexes;
Alter table sales exchange partition day_2004_jan1 rebuild unusable local indexes;




=============

Flash Cash Enable steps -

Steps to enable the flash in Write Back mode –

CellCli> drop flashcache
CellCLI> alter cell shutdown services cellsrv
CellCLI> alter cell flashCacheMode = WriteBack
CellCLI> alter cell startup services cellsrv
CellCLI> create flashcache all

The Write Back Cache mode can be reverted back to the Write Through Cache mode by manually flushing all the dirty blocks back to the disk

CellCLI> alter flashcache all flush
CellCLI> drop flashcache
CellCLI> alter cell shutdown services cellsrv
CellCLI> alter cell flashCacheMode=Writethrough
CellCLI> alter cell startup services cellsrv

CellCLI> list flashcache details
CellCLI> create flashcache all size=100g
CellCLI> list flashcache detail
CellCLI> create griddisk all flashdisk prefix=flashdrive
CellCLI> list griddisk flashdrive_FD_10_exa5cel02 detail




===================

SQL> select status from GV$IO_CALIBRATION_STATUS;
STATUS
------------
NOT AVAILABLE


then used DBMS_RESOURCE_MANAGER.CALIBRATE_IO Procedure.
SQL> SET SERVEROUTPUT ON
SQL> DECLARE
lat INTEGER;
iops INTEGER;
mbps INTEGER;
BEGIN


--DBMS_RESOURCE_MANAGER.CALIBRATE_IO(, ,iops, mbps, lat);
DBMS_RESOURCE_MANAGER.CALIBRATE_IO (28, 10, iops, mbps, lat);
DBMS_OUTPUT.PUT_LINE ('max_iops = ' || iops);
DBMS_OUTPUT.PUT_LINE ('latency = ' || lat);
DBMS_OUTPUT.PUT_LINE ('max_mbps = ' || mbps);
end;
/

max_iops = 5944
latency = 9
max_mbps = 75

PL/SQL procedure successfully completed.

if using DBMS_RESOURCE_MANAGER.CALIBRATE_IO and error ORA-56708: Could not find any datafiles with asynchronous i/o capability
Need to enable asynch I/O, set two values in the init.ora file.
disk_asynch_io = true
filesystemio_options = asynch
After used CALIBRATE_IO, then


==


DBMS_RESOURCE_MANAGER.CALIBRATE_IO (
num_physical_disks IN PLS_INTEGER DEFAULT 1,
max_latency IN PLS_INTEGER DEFAULT 20,
max_iops OUT PLS_INTEGER,
actual_latency OUT PLS_INTEGER);
max_mbps OUT PLS_INTEGER,  
col name format a50
SELECT name, asynch_io FROM v$datafile f,v$iostat_file i
WHERE f.file# = i.file_no
AND filetype_name = 'Data File';

No comments:

Post a Comment