Lots of times, when I’m testing theories, I need a large test table to see how things scale. Here’s a script that will quickly create such a table with a primary key/unique index, along with a bitmap index. The table has 10 partitions (this is necessary to allow 10 concurrent “append” inserts). DBMS_PARALLEL_EXECUTE is used to build the table in parallel, and DBMS_PCLXUTIL is used to build the bitmap index in parallel.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
WARNING! DON’T run this on your production system.
It consumes a *LOT* of CPU, a *LOT* of physical disk I/O resources, and a good amount of hard drive space (On my system, the combined table/index size is 46.9 GB).
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Fast is, of course, a relative term, and will vary widely based on your hardware.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
define v_table_name = drop_me create table &v_table_name ( id number(*,0) constraint pk_&v_table_name. primary key , low_cardinality varchar2(10) , data varchar2(500) constraint nn_data not null ) pctfree 0 storage (initial 50m) nologging partition by range (id) ( partition P_0_to_99999999 values less than (100000000) segment creation immediate , partition P_100000000_to_199999999 values less than (200000000) segment creation immediate , partition P_200000000_to_299999999 values less than (300000000) segment creation immediate , partition P_300000000_to_399999999 values less than (400000000) segment creation immediate , partition P_400000000_to_499999999 values less than (500000000) segment creation immediate , partition P_500000000_to_599999999 values less than (600000000) segment creation immediate , partition P_600000000_to_699999999 values less than (700000000) segment creation immediate , partition P_700000000_to_799999999 values less than (800000000) segment creation immediate , partition P_800000000_to_899999999 values less than (900000000) segment creation immediate , partition P_900000000_to_999999999 values less than (1000000000) segment creation immediate ) ; set serveroutput on declare v_task_name sys.all_scheduler_jobs.job_name%type := dbms_parallel_execute.generate_task_name(substr('&v_table_name.', 1, 18)); begin dbms_parallel_execute.create_task( task_name => v_task_name ); dbms_parallel_execute.create_chunks_by_sql( task_name => v_task_name , sql_stmt => q'< select (rownum - 1) * 100000000 start_id , (rownum * 100000000) - 1 end_id from dual connect by level <= 10 >' , by_rowid => false ); dbms_parallel_execute.run_task( task_name => v_task_name , sql_stmt => q'<declare v_start_id number := :start_id; v_end_id number := :end_id; v_sql varchar2(5000); v_partition_name user_tab_partitions.partition_name%type; begin case v_start_id when 0 then v_partition_name := 'P_0_to_99999999'; when 100000000 then v_partition_name := 'P_100000000_to_199999999'; when 200000000 then v_partition_name := 'P_200000000_to_299999999'; when 300000000 then v_partition_name := 'P_300000000_to_399999999'; when 400000000 then v_partition_name := 'P_400000000_to_499999999'; when 500000000 then v_partition_name := 'P_500000000_to_599999999'; when 600000000 then v_partition_name := 'P_600000000_to_699999999'; when 700000000 then v_partition_name := 'P_700000000_to_799999999'; when 800000000 then v_partition_name := 'P_800000000_to_899999999'; when 900000000 then v_partition_name := 'P_900000000_to_999999999'; else raise_application_error(-2100, 'Invalid partitoin =('); end case; v_sql := q'[ insert /*+ append */into drop_me partition (]' || v_partition_name || q'[) with generator as ( select rownum x from dual connect by level <= 3200 ) select (rownum - 1) + ]' || v_start_id || q'[ , rpad(chr(mod(rownum - 1, 26) + 65), 5, chr(mod(rownum - 1, 26) + 65)) , rpad('X', 400, 'X') from generator a cross join generator b where rownum <= 100000000 ]'; execute immediate v_sql; end;>' , language_flag => dbms_sql.native , parallel_level => 10 ); while dbms_parallel_execute.task_status(v_task_name) not in (dbms_parallel_execute.finished, dbms_parallel_execute.crashed, dbms_parallel_execute.chunking_failed, dbms_parallel_execute.finished_with_error) loop -- Sleep 3 seconds between polls dbms_lock.sleep(3); end loop; case dbms_parallel_execute.task_status(v_task_name) when dbms_parallel_execute.finished then dbms_output.put_line('Job finished successfully'); when dbms_parallel_execute.crashed then dbms_output.put_line('Job crashed'); when dbms_parallel_execute.chunking_failed then dbms_output.put_line('Chunking failed'); when dbms_parallel_execute.finished_with_error then dbms_output.put_line('Finished with error =('); end case; if dbms_parallel_execute.task_status(v_task_name) = dbms_parallel_execute.finished then dbms_parallel_execute.drop_task(v_task_name); end if; end; / -- Write all dirty buffers out to disk alter system checkpoint; -- Build a bitmap index on the low-cardinality column in parallel create index &v_table_name._bidx on &v_table_name (low_cardinality) local unusable; declare CONST_NUMBER_OF_PARTITIONS constant number := 10; v_usable_count number; v_keep_polling boolean := true; begin dbms_pclxutil.build_part_index( jobs_per_batch => CONST_NUMBER_OF_PARTITIONS , procs_per_job => 1 , tab_name => upper('&v_table_name.') , idx_name => upper('&v_table_name._bidx') , force_opt => false ); while v_keep_polling loop dbms_lock.sleep(3); select count(*) into v_usable_count from user_ind_partitions where index_name = upper('&v_table_name._bidx') and status = 'USABLE'; if v_usable_count = CONST_NUMBER_OF_PARTITIONS then v_keep_polling := false; end if; end loop; end; / -- Again, write all dirty buffers out to disk alter system checkpoint; -- Gather stats, no histograms, in parallel 10 exec dbms_stats.gather_table_stats(ownname => user, tabname => upper('&v_table_name.'), cascade => true, degree => 10, method_opt => 'for all columns size 1'); |