filename
stringlengths
78
241
omp_pragma_line
stringlengths
24
416
context_chars
int64
100
100
text
stringlengths
152
177k
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private (pidx) schedule(dynamic)
100
posit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ <LOOP-START>for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); /* ---------------- SUBCYCLE 3 OF 4 ----------------- */ /* Calculate deposit for this subcycle based on last subcycle's residue */ deposit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ #pragma omp parallel for private (pidx) schedule(dynamic) for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); /* ---------------- SUBCYCLE 4 OF 4 ----------------- */ /* Calculate deposit for this subcycle based on last subcycle's residue */ deposit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ #pragma omp parallel for private (pidx) schedule(dynamic) for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); } /* Do one cycle (10 subcycles) using "omp parallel for schedule(dynamic)" */ void dynamic_omp_cycle() { /* Emulate calls to 4 different packages, do 10 subcycles total */ dynamic_omp_module1(); dynamic_omp_module2(); dynamic_omp_module3(); dynamic_omp_module4(); }<LOOP-END> <OMP-START>#pragma omp parallel for private (pidx) schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private (pidx) schedule(dynamic)
100
posit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ <LOOP-START>for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); /* ---------------- SUBCYCLE 4 OF 4 ----------------- */ /* Calculate deposit for this subcycle based on last subcycle's residue */ deposit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ #pragma omp parallel for private (pidx) schedule(dynamic) for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); } /* Do one cycle (10 subcycles) using "omp parallel for schedule(dynamic)" */ void dynamic_omp_cycle() { /* Emulate calls to 4 different packages, do 10 subcycles total */ dynamic_omp_module1(); dynamic_omp_module2(); dynamic_omp_module3(); dynamic_omp_module4(); }<LOOP-END> <OMP-START>#pragma omp parallel for private (pidx) schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private (pidx) schedule(dynamic)
100
posit = calc_deposit (); /* Scan through zones and add appropriate deposit to each zone */ <LOOP-START>for (pidx = 0; pidx < CLOMP_numParts; pidx++) update_part (partArray[pidx], deposit); } /* Do one cycle (10 subcycles) using "omp parallel for schedule(dynamic)" */ void dynamic_omp_cycle() { /* Emulate calls to 4 different packages, do 10 subcycles total */ dynamic_omp_module1(); dynamic_omp_module2(); dynamic_omp_module3(); dynamic_omp_module4(); }<LOOP-END> <OMP-START>#pragma omp parallel for private (pidx) schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private(partId) schedule(static)
100
may be set to 1 for allocate) * to allow potentially better memory layout for threads */ <LOOP-START>for (partId = 0; partId < CLOMP_numParts; partId++) { Part *part; if ((part= (Part *) malloc (sizeof (Part))) == NULL) { fprintf (stderr, "Out of memory allocating part\n"); exit (1); } /* Call standard part initializer for part just allocated. * Allows parts to be allocated as desired. */ addPart(part, partId); }<LOOP-END> <OMP-START>#pragma omp parallel for private(partId) schedule(static) <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private(partId) schedule(static)
100
rt just allocated. * Allows parts to be allocated as desired. */ addPart(part, partId); } <LOOP-START>/* Create and add zones to parts. * Do allocations in thread (allocThreads may be set to 1 for allocate) * to allow potentially better memory layout for threads */ for (partId = 0; partId < CLOMP_numParts; partId++) { Zone *zoneArray, *zone; int zoneId; /* Allocate an array of zones for this part */ zoneArray = (Zone *)malloc (CLOMP_zoneSize * CLOMP_zonesPerPart); if (zoneArray == NULL) { fprintf (stderr, "Out of memory allocate zone array\n"); exit (1); } /* Put all zones into part's zone linked list */ for (zoneId = 0; zoneId < CLOMP_zonesPerPart; zoneId++) { /* Get the current zone being placed */ zone = &zoneArray[zoneId]; /* Add it to the end of the the part */ addZone (partArray[partId], zone); } #if 0 /* Print out memory address for zoneArray to see where it maps */ printf ("Part %i threadId %i: zones %p - %p\n", (int)partId, omp_get_thread_num(), zoneArray, &zoneArray[CLOMP_zonesPerPart-1]); }<LOOP-END> <OMP-START>#pragma omp parallel for private(partId) schedule(static) <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private (pidx) schedule(static)");
100
print_pseudocode ("Static OMP", "deposit = calc_deposit ();"); print_pseudocode ("Static OMP", "<LOOP-START>print_pseudocode ("Static OMP", "for (pidx = 0; pidx lt numParts; pidx++)"); print_pseudocode ("Static OMP", " update_part (partArray[pidx], deposit);"); print_pseudocode ("Static OMP", "------- End Static OMP Pseudocode -------"); print_start_message ("Static OMP"); #ifdef WITH_MPI /* Ensure all MPI tasks run OpenMP at the same time */ MPI_Barrier (MPI_COMM_WORLD); get_timestamp (&static_omp_start_ts); do_static_omp_version(); get_timestamp (&static_omp_end_ts); /* Check data for consistency and print out data stats*/ print_data_stats ("Static OMP"); /* Print out serial time stats and capture time. * Also print speedup compared to serial run time. */ static_omp_seconds = print_timestats ("Static OMP", &static_omp_start_ts, &static_omp_end_ts, serial_ref_seconds, bestcase_omp_seconds); /* --------- Start Dynamic OMP benchmark measurement --------- */ /* Do one cycle outside timer loop to warm up code and (for OpenMP cases) * allow the OpenMP system to initialize (which can be expensive, skewing * the measurments for small runtimes */ reinitialize_parts(); dynamic_omp_cycle(); /* Reinitialize parts and warm up cache by doing dummy update */ reinitialize_parts(); /* Do the OMP Dynamic OMP version of calculation and measure time*/ print_pseudocode ("Dynamic OMP", "------ Start Dynamic OMP Pseudocode ------"); print_pseudocode ("Dynamic OMP", "/* Use OpenMP parallel for schedule(dynamic) on orig loop. */"); print_pseudocode ("Dynamic OMP", "deposit = calc_deposit ();"); print_pseudocode ("Dynamic OMP", "#pragma omp parallel for private (pidx) schedule(dynamic)"); print_pseudocode ("Dynamic OMP", "for (pidx = 0; pidx lt numParts; pidx++)"); print_pseudocode ("Dynamic OMP", " update_part (partArray[pidx], deposit);"); print_pseudocode ("Dynamic OMP", "------- End Dynamic OMP Pseudocode -------"); print_start_message ("Dynamic OMP"); #ifdef WITH_MPI /* Ensure all MPI tasks run OpenMP at the same time */ MPI_Barrier (MPI_COMM_WORLD); get_timestamp (&dynamic_omp_start_ts); do_dynamic_omp_version(); get_timestamp (&dynamic_omp_end_ts); /* Check data for consistency and print out data stats*/ print_data_stats ("Dynamic OMP"); /* Print out serial time stats and capture time. * Also print speedup compared to serial run time. */ dynamic_omp_seconds = print_timestats ("Dynamic OMP", &dynamic_omp_start_ts, &dynamic_omp_end_ts, serial_ref_seconds, bestcase_omp_seconds); /* --------- Start Manual OMP benchmark measurement --------- */ /* Do one cycle outside timer loop to warm up code and (for OpenMP cases) * allow the OpenMP system to initialize (which can be expensive, skewing * the measurments for small runtimes */ reinitialize_parts(); do_manual_omp_version(1); /* Reinitialize parts and warm up cache by doing dummy update */ reinitialize_parts(); /* Do the OMP Manual OMP version of calculation and measure time*/ print_pseudocode ("Manual OMP", "------ Start Manual OMP Pseudocode ------"); print_pseudocode ("Manual OMP", "/* At top level, spawn threads and manually partition parts*/"); print_pseudocode ("Manual OMP", "#pragma omp parallel"); print_pseudocode ("Manual OMP", "{"); print_pseudocode ("Manual OMP", " int startPidx = ... /* slice based on thread_id*/"); print_pseudocode ("Manual OMP", " for (iter = 0; iter lt num_iterations; iter++) "); print_pseudocode ("Manual OMP", " do_iter(startPidx, endPidx);"); print_pseudocode ("Manual OMP", "}<LOOP-END> <OMP-START>#pragma omp parallel for private (pidx) schedule(static)");<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/CLOMP/clomp.c
#pragma omp parallel for private (pidx) schedule(dynamic)");
100
int_pseudocode ("Dynamic OMP", "deposit = calc_deposit ();"); print_pseudocode ("Dynamic OMP", "<LOOP-START>print_pseudocode ("Dynamic OMP", "for (pidx = 0; pidx lt numParts; pidx++)"); print_pseudocode ("Dynamic OMP", " update_part (partArray[pidx], deposit);"); print_pseudocode ("Dynamic OMP", "------- End Dynamic OMP Pseudocode -------"); print_start_message ("Dynamic OMP"); #ifdef WITH_MPI /* Ensure all MPI tasks run OpenMP at the same time */ MPI_Barrier (MPI_COMM_WORLD); get_timestamp (&dynamic_omp_start_ts); do_dynamic_omp_version(); get_timestamp (&dynamic_omp_end_ts); /* Check data for consistency and print out data stats*/ print_data_stats ("Dynamic OMP"); /* Print out serial time stats and capture time. * Also print speedup compared to serial run time. */ dynamic_omp_seconds = print_timestats ("Dynamic OMP", &dynamic_omp_start_ts, &dynamic_omp_end_ts, serial_ref_seconds, bestcase_omp_seconds); /* --------- Start Manual OMP benchmark measurement --------- */ /* Do one cycle outside timer loop to warm up code and (for OpenMP cases) * allow the OpenMP system to initialize (which can be expensive, skewing * the measurments for small runtimes */ reinitialize_parts(); do_manual_omp_version(1); /* Reinitialize parts and warm up cache by doing dummy update */ reinitialize_parts(); /* Do the OMP Manual OMP version of calculation and measure time*/ print_pseudocode ("Manual OMP", "------ Start Manual OMP Pseudocode ------"); print_pseudocode ("Manual OMP", "/* At top level, spawn threads and manually partition parts*/"); print_pseudocode ("Manual OMP", "#pragma omp parallel"); print_pseudocode ("Manual OMP", "{"); print_pseudocode ("Manual OMP", " int startPidx = ... /* slice based on thread_id*/"); print_pseudocode ("Manual OMP", " for (iter = 0; iter lt num_iterations; iter++) "); print_pseudocode ("Manual OMP", " do_iter(startPidx, endPidx);"); print_pseudocode ("Manual OMP", "}<LOOP-END> <OMP-START>#pragma omp parallel for private (pidx) schedule(dynamic)");<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(static) ordered
100
Schedule static\n#################################################\n"); cnt_static = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static (j, "static"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(static, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_static_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static_chunked (j, "static chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(dynamic) ordered
100
chedule dynamic\n#################################################\n"); cnt_dynamic = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic (j, "dynamic"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(dynamic, cs) ordered
100
%d)\n#################################################\n", cs); cnt_dynamic_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic_chunked (j, "dynamic chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(guided) ordered
100
Schedule guided\n#################################################\n"); cnt_guided = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided (j, "guided"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(guided, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_guided_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided_chunked (j, "guided chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(auto) ordered
100
ered Schedule auto\n#################################################\n"); cnt_auto = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_auto (j, "auto"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(auto) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8u.c
#pragma omp parallel for schedule(runtime) ordered
100
chedule runtime\n#################################################\n"); cnt_runtime = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_runtime (j, "runtime"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(runtime) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(static) ordered
100
Schedule static\n#################################################\n"); cnt_static = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static (j, "static"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(static, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_static_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static_chunked (j, "static chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(dynamic) ordered
100
chedule dynamic\n#################################################\n"); cnt_dynamic = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic (j, "dynamic"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(dynamic, cs) ordered
100
%d)\n#################################################\n", cs); cnt_dynamic_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic_chunked (j, "dynamic chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(guided) ordered
100
Schedule guided\n#################################################\n"); cnt_guided = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided (j, "guided"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(guided, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_guided_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided_chunked (j, "guided chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(auto) ordered
100
ered Schedule auto\n#################################################\n"); cnt_auto = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_auto (j, "auto"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(auto) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4.c
#pragma omp parallel for schedule(runtime) ordered
100
chedule runtime\n#################################################\n"); cnt_runtime = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_runtime (j, "runtime"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(runtime) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(static) ordered
100
Schedule static\n#################################################\n"); cnt_static = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static (j, "static"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(static, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_static_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static_chunked (j, "static chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(dynamic) ordered
100
chedule dynamic\n#################################################\n"); cnt_dynamic = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic (j, "dynamic"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(dynamic, cs) ordered
100
%d)\n#################################################\n", cs); cnt_dynamic_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic_chunked (j, "dynamic chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(guided) ordered
100
Schedule guided\n#################################################\n"); cnt_guided = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided (j, "guided"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(guided, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_guided_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided_chunked (j, "guided chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(auto) ordered
100
ered Schedule auto\n#################################################\n"); cnt_auto = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_auto (j, "auto"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(auto) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_4u.c
#pragma omp parallel for schedule(runtime) ordered
100
chedule runtime\n#################################################\n"); cnt_runtime = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_runtime (j, "runtime"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(runtime) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(static) ordered
100
Schedule static\n#################################################\n"); cnt_static = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static (j, "static"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(static, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_static_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_static_chunked (j, "static chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(dynamic) ordered
100
chedule dynamic\n#################################################\n"); cnt_dynamic = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic (j, "dynamic"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(dynamic, cs) ordered
100
%d)\n#################################################\n", cs); cnt_dynamic_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_dynamic_chunked (j, "dynamic chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(guided) ordered
100
Schedule guided\n#################################################\n"); cnt_guided = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided (j, "guided"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(guided, cs) ordered
100
(%d)\n#################################################\n", cs); cnt_guided_chunked = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_guided_chunked (j, "guided chunked"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided, cs) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(auto) ordered
100
ered Schedule auto\n#################################################\n"); cnt_auto = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_auto (j, "auto"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(auto) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/simple/ordered_8.c
#pragma omp parallel for schedule(runtime) ordered
100
chedule runtime\n#################################################\n"); cnt_runtime = init; <LOOP-START>for (j = init; j < size; j++) { #pragma omp ordered { check_runtime (j, "runtime"); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(runtime) ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c
#pragma omp parallel for
100
; i++) { bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS); } <LOOP-START>for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = 0; #else /*USE_BUCKETS*/ key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); key_buff1_aptr[0] = key_buff1; for (i = 1; i < num_procs; i++) { key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c
#pragma omp parallel for private(i,j,k,k1) schedule(static,1)
100
ETS /* Buckets are already sorted. Sorting keys within each bucket */ #ifdef SCHED_CYCLIC <LOOP-START>#else #pragma omp parallel for private(i,j,k,k1) schedule(dynamic) for( j=0; j< NUM_BUCKETS; j++ ) { k1 = (j > 0)? bucket_ptrs[j-1] : 0; for ( i = k1; i < bucket_ptrs[j]; i++ ) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for private(i,j,k,k1) schedule(static,1)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c
#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)
100
*/ #ifdef SCHED_CYCLIC #pragma omp parallel for private(i,j,k,k1) schedule(static,1) #else <LOOP-START>for( j=0; j< NUM_BUCKETS; j++ ) { k1 = (j > 0)? bucket_ptrs[j-1] : 0; for ( i = k1; i < bucket_ptrs[j]; i++ ) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/OpenMP/NAS/IS/is.c
#pragma omp parallel for reduction(+:j)
100
endif /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; <LOOP-START>for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, k; INT_TYPE *key_buff_ptr, *key_buff_ptr2; #ifdef USE_BUCKETS int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE num_bucket_keys = (1L << shift); key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Setup pointers to key buffers */ #ifdef USE_BUCKETS key_buff_ptr2 = key_buff2; #else key_buff_ptr2 = key_array; key_buff_ptr = key_buff1; #pragma omp parallel private(i, k) { INT_TYPE *work_buff, m, k1, k2; int myid = 0, num_procs = 1; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); /* Bucket sort is known to improve cache performance on some */ /* cache based systems. But the actual performance may depend */ /* on cache size, problem size. */ #ifdef USE_BUCKETS work_buff = bucket_size[myid]; /* Initialize */ for( i=0; i<NUM_BUCKETS; i++ ) work_buff[i] = 0; /* Determine the number of keys in each bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_array[i] >> shift]++; /* Accumulative bucket sizes are the bucket pointers. These are global sizes accumulated upon to each bucket */ bucket_ptrs[0] = 0; for( k=0; k< myid; k++ ) bucket_ptrs[0] += bucket_size[k][0]; for( i=1; i< NUM_BUCKETS; i++ ) { bucket_ptrs[i] = bucket_ptrs[i-1]; for( k=0; k< myid; k++ ) bucket_ptrs[i] += bucket_size[k][i]; for( k=myid; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i-1]; } /* Sort into appropriate bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) { k = key_array[i]; key_buff2[bucket_ptrs[k >> shift]++] = k; } /* The bucket pointers now point to the final accumulated sizes */ if (myid < num_procs-1) { for( i=0; i< NUM_BUCKETS; i++ ) for( k=myid+1; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i]; } /* Now, buckets are sorted. We only need to sort keys inside each bucket, which can be done in parallel. Because the distribution of the number of keys in the buckets is Gaussian, the use of a dynamic schedule should improve load balance, thus, performance */ #ifdef SCHED_CYCLIC #pragma omp for schedule(static,1) #else #pragma omp for schedule(dynamic) for( i=0; i< NUM_BUCKETS; i++ ) { /* Clear the work array section associated with each bucket */ k1 = i * num_bucket_keys; k2 = k1 + num_bucket_keys; for ( k = k1; k < k2; k++ ) key_buff_ptr[k] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ m = (i > 0)? bucket_ptrs[i-1] : 0; for ( k = m; k < bucket_ptrs[i]; k++ ) key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ key_buff_ptr[k1] += m; for ( k = k1+1; k < k2; k++ ) key_buff_ptr[k] += key_buff_ptr[k-1]; } #else /*USE_BUCKETS*/ work_buff = key_buff1_aptr[myid]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) work_buff[i] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ #pragma omp for nowait schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population */ for( i=0; i<MAX_KEY-1; i++ ) work_buff[i+1] += work_buff[i]; #pragma omp barrier /* Accumulate the global key population */ for( k=1; k<num_procs; k++ ) { #pragma omp for nowait schedule(static) for( i=0; i<MAX_KEY; i++ ) key_buff_ptr[i] += key_buff1_aptr[k][i]; } /*USE_BUCKETS*/ } /*omp parallel*/ /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 < k && k <= NUM_KEYS-1 ) { INT_TYPE key_rank = key_buff_ptr[k-1]; int failed = 0; switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_rank != test_rank_array[i]+(iteration-2) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_rank != test_rank_array[i]+(iteration-1) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-(iteration-1) ) failed = 1; else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'D': if( i < 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; } if( failed == 1 ) printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, (int)i ); } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff_ptr; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:j)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/Threads/tbb/src/test/test_openmp.cpp
#pragma omp parallel for reduction(+:sum)
100
int start = i<n ? 0 : i-n+1; int finish = i<m ? i+1 : m; T sum = 0; <LOOP-START>for( int j=start; j<finish; ++j ) sum += my_a[j]*my_b[i-j]; my_c[i] = sum; } } }; //! Test TBB loop around OpenMP loop void TBB_OpenMP_Convolve( T c[], const T a[], int m, const T b[], int n ) { REMARK("testing TBB loop around OpenMP loop\n"); parallel_for( blocked_range<int>(0,m+n-1,10), OuterBody( c, a, m, b, n ) ); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c
#pragma omp parallel for shared (A, b, mat_size, A_aug)
100
ouble A_aug[3][4] = {0,0,0,0,0,0,0,0,0,0,0,0}; double x[3]; // Define Augmented matrix <LOOP-START>for (int i = 0; i <mat_size;i++) { for(int j = 0; j <mat_size;j++) { A_aug[i][j] = A[i][j]; } A_aug[i][mat_size] = b[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for shared (A, b, mat_size, A_aug)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c
#pragma omp parallel for shared (A_aug, mat_size)
100
// Developing the augmented matrix for (int col = 0; col < mat_size; col++) { <LOOP-START>for (int row = col+1; row < mat_size; row++) { double alp = A_aug[row][col] / A_aug[col][col]; for (int k = 0; k < mat_size+1; k++) { A_aug[row][k] = A_aug[row][k] - alp*A_aug[col][k]; } }<LOOP-END> <OMP-START>#pragma omp parallel for shared (A_aug, mat_size)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/6/Gauss_Elimination/main.c
#pragma omp parallel for shared(A_aug, x, mat_size) reduction(+:sum)
100
olution vector for (int row = mat_size-2; row>= 0; row--) { double sum = 0; <LOOP-START>for (int col = row; col < mat_size; col++) { sum = sum + A_aug[row][col]*x[col]; }<LOOP-END> <OMP-START>#pragma omp parallel for shared(A_aug, x, mat_size) reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/8/TDMA/main.c
#pragma omp parallel for firstprivate(A,b,mat_size) shared (x)
100
b[i] = b[i] - m*b[i-1]; } x[mat_size-1] = b[mat_size-1]/A[mat_size-1][mat_size-1]; <LOOP-START>for (int i = mat_size-2; i >= 0; i--) { x[i] = (b[i] - A[i][i+1]*x[i+1]) / A[i][i]; }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(A,b,mat_size) shared (x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/9/Bisection_Method/main.c
#pragma omp parallel for reduction(+:count) lastprivate(c) firstprivate (a,b, n_iters, tol)
100
; double tol = 0.01; // Tolerance limit of the approximate solution to the exact solution <LOOP-START>for (int i = 0; i < n_iters; i++) { if (func_x(a) * func_x(b) < 0) // Condition for a and b to be on the opposite side of the root { c = (a+b)/2.0; // Midpoint if (func_x(c) * func_x(a) < 0) { b = c; // b and c are on the same side of the root count = count+1; // increment counter } else { a = c; // a and c are on the same side of the root count = count + 1; // increment counter } } if (func_x(c) == 0 || (b-a)/2.0 < tol) // Conditions for accepting the solution { #pragma omp cancel for // acts like a break command, but increases number of iterations significantly } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:count) lastprivate(c) firstprivate (a,b, n_iters, tol)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/1/MacLaurin/main.c
#pragma omp parallel for reduction (+:sum)
100
num_threads; // Parallel implementation double start_parallel_time = omp_get_wtime(); <LOOP-START>for (int i = 0; i < n_iters; i++) { num_threads = omp_get_num_threads(); sum = sum + pow(a,i)/factorial(i); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/4/Differentiation_Single_Variable/main.c
#pragma omp parallel for firstprivate(h) shared(fwd_x, bwd_x, cd_x)
100
erence, Backward difference and Central difference double par_start_time = omp_get_wtime(); <LOOP-START>for (int i = 0; i < num_points; i++) { num_threads = omp_get_num_threads(); double a = i + 2; fwd_x[i] = (func_x(a+h) - func_x(a))/h; bwd_x[i] = (func_x(a) - func_x(a-h))/h; cd_x[i] = (func_x(a+h) - func_x(a-h))/(2*h); }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(h) shared(fwd_x, bwd_x, cd_x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/7/Gauss_Siedel/main.c
#pragma omp parallel for reduction (+:sum) firstprivate (row, A)
100
for (int row = 0; row < mat_size; row++) { double sum = 0; <LOOP-START>for (int col = 0; col < row-1; col++) { num_threads = omp_get_num_threads(); if (col != row) { sum = sum + A[row][col]*x[col]; } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (+:sum) firstprivate (row, A)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/10/Newton_Raphson_single_variable/main.c
#pragma omp parallel for reduction(+:counter) lastprivate(x_new) firstprivate(x, n_iters, tol)
100
100; // Number of iterations int counter = 0; double tol = 0.001; // Tolerance limit <LOOP-START>for (int i = 0; i < n_iters; i++) { x_new = x - (func_x(x) / diff_x(x)); if (fabs(x_new - x) < tol) { #pragma omp cancel for // Stop iteration when tolerance reached - number of iterations grow significantly } x = x_new; // Substitute old value with newer one for next iteration counter = counter+1; // Number of iterations counter }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:counter) lastprivate(x_new) firstprivate(x, n_iters, tol)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/2/Taylor_Series/main.c
#pragma omp parallel for reduction(+:sum)
100
hread_count; // Parallel implementation double start_parallel_time = omp_get_wtime(); <LOOP-START>for (int i = 0; i < 3; i++) { thread_count = omp_get_num_threads(); sum = sum + ((pow(h,i))/factorial(i))*exp(a); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/5/Numerical_Integration/main.c
#pragma omp parallel for reduction(+:integral_mult) firstprivate(num_points, h)
100
double integral_mult = 0; int thread_num; double par_start_time = omp_get_wtime(); <LOOP-START>for (int i = 0; i < num_points-1; i++) { thread_num = omp_get_num_threads(); integral_mult = integral_mult + ( (h/2.0) * (func_x(a + i*h) + func_x(a+(i+1)*h)) ); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:integral_mult) firstprivate(num_points, h)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/13/Linear_Regression/main.c
#pragma omp parallel for reduction(+:x_sum, y_sum) firstprivate(x,y)
100
len_arr = sizeof(x)/sizeof(x[0]); double x_sum = 0; double y_sum = 0; double a, b; <LOOP-START>for (int i = 0; i < len_arr; i++) { x_sum = x_sum + x[i]; y_sum = y_sum + y[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x_sum, y_sum) firstprivate(x,y)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Programming-for-Numerical-Computation/13/Linear_Regression/main.c
#pragma omp parallel for reduction(+:temp1, temp2) firstprivate(x_avg, y_avg, x, y)
100
= x_sum/len_arr; double y_avg = y_sum/len_arr; double temp1 = 0; double temp2 = 0; <LOOP-START>for (int i = 0; i < len_arr; i++) { temp1 = temp1 + (x[i] - x_avg)*(y[i] - y_avg); temp2 = temp2 + (x[i] - x_avg)*(x[i] - x_avg); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:temp1, temp2) firstprivate(x_avg, y_avg, x, y)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D NonLinear Convection/2D_NonLinear_Convection/main.c
#pragma omp parallel for
100
oints], u_new[y_points][x_points]; double v[y_points][x_points], v_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; v[i][j] = 1.0; u_new[i][j] = 1.0; v_new[i][j] = 1.0; if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; v[i][j] = 2.0; u_new[i][j] = 2.0; v_new[i][j] = 2.0; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Burgers Equation/2-D_Burgers_Equation/main.c
#pragma omp parallel for
100
oints], u_new[y_points][x_points]; double v[y_points][x_points], v_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; v[i][j] = 1.0; u_new[i][j] = 1.0; v_new[i][j] = 1.0; if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; v[i][j] = 2.0; u_new[i][j] = 2.0; v_new[i][j] = 2.0; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Channel Flow/2-D_Channel_Flow/main.c
#pragma omp parallel for
100
s]; double u_new[y_points][x_points], v_new[y_points][x_points], p_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 0.0; v[i][j] = 0.0; p[i][j] = 0.0; u_new[i][j] = 0.0; v_new[i][j] = 0.0; p_new[i][j] = 0.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D NonLinear Convection/main.c
#pragma omp parallel for firstprivate(del_x)
100
consider double del_x = x_len/(x_points-1); // Length of an element double x[x_points]; <LOOP-START>for (int i = 0; i < x_points; i++){ x[i] = i * del_x; // x co-ordinates }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(del_x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D NonLinear Convection/main.c
#pragma omp parallel for shared(x)
100
// Velocity at current time double u_new[x_points]; // Velocity at next time interval <LOOP-START>for (int i = 0; i < x_points; i++){ if (x[i] > 0.5 && x[i] < 1.0){ u[i] = 2.0; u_new[i] = 2.0; } else{ u[i] = 1.0; u_new[i] = 1.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for shared(x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Diffusion/2-D_Diffusion/main.c
#pragma omp parallel for
100
oints], u_new[y_points][x_points]; double v[y_points][x_points], v_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; v[i][j] = 1.0; u_new[i][j] = 1.0; v_new[i][j] = 1.0; if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; v[i][j] = 2.0; u_new[i][j] = 2.0; v_new[i][j] = 2.0; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Linear Convection/main.c
#pragma omp parallel for
100
to consider float del_x = x_len/(x_points-1); // Length of an element float x[x_points]; <LOOP-START>for (int i = 0; i < x_points; i++){ x[i] = i * del_x; // x co-ordinates }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Poissons Equation/2-D_Poissons_Equation/main.c
#pragma omp parallel for
100
points], p_new[y_points][x_points]; double b[y_points][x_points]; // source term <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ p[i][j] = 0.0; p_new[i][j] = 0.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Poissons Equation/2-D_Poissons_Equation/main.c
#pragma omp parallel for
100
j] = 0.0; } } // Initialize source term - add spikes at 1/4th and 3/4th length <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ b[i][j] = 0.0; if(i == abs(0.25*x_points) && j == abs(0.25*y_points)){ b[i][j] = 100; } if(i == abs(0.75*x_points) && j == abs(0.75*y_points)){ b[i][j] = -100; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Diffusion/1-D_Diffusion/main.c
#pragma omp parallel for
100
// CFL criteria double x[x_points]; double u[x_points]; double u_new[x_points]; <LOOP-START>for (int i = 0; i < x_points; i++){ x[i] = i * del_x; // Co-ordinates of the grid points if (x[i] > 0.5 && x[i] < 1.0){ // Applying I.C.s for velocity values u[i] = 2.0; u_new[i] = 2.0; } else{ u[i] = 1.0; u_new[i] = 1.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Laplace Equation/2-D_Laplace_Equation/main.c
#pragma omp parallel for
100
ints]; double l1norm = 1.0; double l1norm_limit = 0.0001; double sum_num, sum_den; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ p[i][j] = 0.0; p_new[i][j] = 0.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Linear Convection/2-D_Linear_Convection/main.c
#pragma omp parallel for
100
} // } double u[y_points][x_points]; double u_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; u_new[i][j] = 1.0; if(x[i] > 0.5 && x[i] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; u_new[i][j] = 2.0; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/2-D Cavity Flow/2-D_Cavity_Flow/main.c
#pragma omp parallel for
100
s]; double u_new[y_points][x_points], v_new[y_points][x_points], p_new[y_points][x_points]; <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 0.0; v[i][j] = 0.0; p[i][j] = 0.0; u_new[i][j] = 0.0; v_new[i][j] = 0.0; p_new[i][j] = 0.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/Dynamic_Memory_Allocation/2-D_NonLinear_Convection/2-D_Nonlinear_Convection_Dynamic/main.c
#pragma omp parallel for
100
oints*x_points*sizeof(double)); v_new = (double *)malloc(y_points*x_points*sizeof(double)); <LOOP-START>for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ *(u+i*x_points+j) = 1.0; *(v+i*x_points+j) = 1.0; *(u_new+i*x_points+j) = 1.0; *(v_new+i*x_points+j) = 1.0; if(*(x+j) > 0.5 && *(x+j) < 1.0 && *(y+i) > 0.5 && *(y+i) < 1.0){ *(u+i*x_points+j) = 2.0; *(v+i*x_points+j) = 2.0; *(u_new+i*x_points+j) = 2.0; *(v_new+i*x_points+j) = 2.0; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/Dynamic_Memory_Allocation/1-D_Linear_Convection/1-D_Linear_Convection_Dynamic/main.c
#pragma omp parallel for
100
end_time - ser_start_time); // Parallel execution // Defining the initial conditions <LOOP-START>for(int i = 0; i < x_points; i++){ *(u+i) = 1.0; *(u_new+i) = 1.0; if(*(x+i) > 0.5 && *(x+i) < 1.0){ *(u+i) = 2.0; *(u_new+i) = 2.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Burgers Equation/1D_Burgers_Equation/main.c
#pragma omp parallel for
100
* pi; int x_points = 1001; double del_x = x_len/(x_points-1); double x[x_points]; <LOOP-START>for (int i = 0; i < x_points; i++){ x[i] = i * del_x; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/OpenMP_C_12_steps_to_Navier_Stokes/1-D Burgers Equation/1D_Burgers_Equation/main.c
#pragma omp parallel for
100
double del_t = nu * del_x; double u[x_points], u_new[x_points]; // Initial value of u <LOOP-START>for (int i = 0; i < x_points; i++){ u[i] = - 2.0 * nu * ( - (2.0 * x[i]) * exp( - (x[i] * x[i]) / (4.0 * nu)) / (4.0 * nu) - (2.0 * x[i] - 4.0 * pi) * exp( - (x[i] - 2.0 * pi) * (x[i] - 2.0 * pi) / (4.0 * nu)) / (4.0 * nu)) / (exp( - (x[i] - 2.0 * pi) * (x[i] - 2.0 * pi) / (4.0 * nu)) + exp( - (x[i] * x[i]) / (4.0 * nu))) + 4.0; u_new[i] = u[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/6/Fibonacci/main.c
#pragma omp parallel for private(val)
100
val = fibonnaci(n-1) + fibonnaci(n-2); return val; } } int main() { int val; <LOOP-START>for (int i = 0; i <= 10; i++) { val = fibonnaci(i); printf("Fibonacci of %d th term is: %d\n", i, val); }<LOOP-END> <OMP-START>#pragma omp parallel for private(val)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/4/Loop/main.c
#pragma omp parallel for reduction(+:sum)
100
ble len_ele = 1.0/(num_ele-1); double sum = 0.0; double start_time = omp_get_wtick(); <LOOP-START>for (int i = 0; i < num_ele; i++) { double x = i*len_ele; sum = sum + 4.0/(1+(x*x)); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/soumyasen1809/Introduction_To_OpenMP/5/Debug_Mandelbrot/main.c
#pragma omp parallel for default(shared) shared(c) firstprivate(eps)
100
ntains the Mandelbrot set, // testing each point to see whether it is inside or outside the set. <LOOP-START>for (int i=0; i<NPOINTS; i++) { for (int j=0; j<NPOINTS; j++) { c.r = -2.0+2.5*(double)(i)/(double)(NPOINTS)+eps; c.i = 1.125*(double)(j)/(double)(NPOINTS)+eps; testpoint(c); } }<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) shared(c) firstprivate(eps)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for
100
tializeGrid( LBM_Grid grid ) { SWEEP_VAR /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>SWEEP_START( 0, 0, -2, 0, 0, SIZE_Z+2 ) LOCAL( grid, C ) = DFL1; LOCAL( grid, N ) = DFL2; LOCAL( grid, S ) = DFL2; LOCAL( grid, E ) = DFL2; LOCAL( grid, W ) = DFL2; LOCAL( grid, T ) = DFL2; LOCAL( grid, B ) = DFL2; LOCAL( grid, NE ) = DFL3; LOCAL( grid, NW ) = DFL3; LOCAL( grid, SE ) = DFL3; LOCAL( grid, SW ) = DFL3; LOCAL( grid, NT ) = DFL3; LOCAL( grid, NB ) = DFL3; LOCAL( grid, ST ) = DFL3; LOCAL( grid, SB ) = DFL3; LOCAL( grid, ET ) = DFL3; LOCAL( grid, EB ) = DFL3; LOCAL( grid, WT ) = DFL3; LOCAL( grid, WB ) = DFL3; CLEAR_ALL_FLAGS_SWEEP( grid ); SWEEP_END } /*############################################################################*/ void LBM_swapGrids( LBM_GridPtr* grid1, LBM_GridPtr* grid2 ) { LBM_GridPtr aux = *grid1; *grid1 = *grid2; *grid2 = aux; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for private( x, y )
100
ForLDC( LBM_Grid grid ) { int x, y, z; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>for( z = -2; z < SIZE_Z+2; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { if( x == 0 || x == SIZE_X-1 || y == 0 || y == SIZE_Y-1 || z == 0 || z == SIZE_Z-1 ) { SET_FLAG( grid, x, y, z, OBSTACLE ); } else { if( (z == 1 || z == SIZE_Z-2) && x > 1 && x < SIZE_X-2 && y > 1 && y < SIZE_Y-2 ) { SET_FLAG( grid, x, y, z, ACCEL ); } } } } }<LOOP-END> <OMP-START>#pragma omp parallel for private( x, y )<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for private( x, y )
100
hannel( LBM_Grid grid ) { int x, y, z; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>for( z = -2; z < SIZE_Z+2; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { if( x == 0 || x == SIZE_X-1 || y == 0 || y == SIZE_Y-1 ) { SET_FLAG( grid, x, y, z, OBSTACLE ); if( (z == 0 || z == SIZE_Z-1) && ! TEST_FLAG( grid, x, y, z, OBSTACLE )) SET_FLAG( grid, x, y, z, IN_OUT_FLOW ); } } } }<LOOP-END> <OMP-START>#pragma omp parallel for private( x, y )<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for private( ux, uy, uz, u2, rho )
100
SWEEP_VAR double ux, uy, uz, u2, rho; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z ) if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) { DST_C ( dstGrid ) = SRC_C ( srcGrid ); DST_S ( dstGrid ) = SRC_N ( srcGrid ); DST_N ( dstGrid ) = SRC_S ( srcGrid ); DST_W ( dstGrid ) = SRC_E ( srcGrid ); DST_E ( dstGrid ) = SRC_W ( srcGrid ); DST_B ( dstGrid ) = SRC_T ( srcGrid ); DST_T ( dstGrid ) = SRC_B ( srcGrid ); DST_SW( dstGrid ) = SRC_NE( srcGrid ); DST_SE( dstGrid ) = SRC_NW( srcGrid ); DST_NW( dstGrid ) = SRC_SE( srcGrid ); DST_NE( dstGrid ) = SRC_SW( srcGrid ); DST_SB( dstGrid ) = SRC_NT( srcGrid ); DST_ST( dstGrid ) = SRC_NB( srcGrid ); DST_NB( dstGrid ) = SRC_ST( srcGrid ); DST_NT( dstGrid ) = SRC_SB( srcGrid ); DST_WB( dstGrid ) = SRC_ET( srcGrid ); DST_WT( dstGrid ) = SRC_EB( srcGrid ); DST_EB( dstGrid ) = SRC_WT( srcGrid ); DST_ET( dstGrid ) = SRC_WB( srcGrid ); continue; }<LOOP-END> <OMP-START>#pragma omp parallel for private( ux, uy, uz, u2, rho )<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \
100
u2, px, py; SWEEP_VAR /* inflow */ /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>ux2, uy2, uz2, rho2, u2, px, py ) SWEEP_START( 0, 0, 0, 0, 0, 1 ) rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WB ); rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WB ); rho = 2.0*rho1 - rho2; px = (SWEEP_X / (0.5*(SIZE_X-1))) - 1.0; py = (SWEEP_Y / (0.5*(SIZE_Y-1))) - 1.0; ux = 0.00; uy = 0.00; uz = 0.01 * (1.0-px*px) * (1.0-py*py); u2 = 1.5 * (ux*ux + uy*uy + uz*uz); LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2); LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END /* outflow */ /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \ ux2, uy2, uz2, rho2, u2, px, py ) SWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z ) rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); uy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ); uz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 /= rho1; uy1 /= rho1; uz1 /= rho1; rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); uy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ); uz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 /= rho2; uy2 /= rho2; uz2 /= rho2; rho = 1.0; ux = 2*ux1 - ux2; uy = 2*uy1 - uy2; uz = 2*uz1 - uz2; u2 = 1.5 * (ux*ux + uy*uy + uz*uz); LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2); LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END } /*############################################################################*/ void LBM_showGridStatistics( LBM_Grid grid ) { int nObstacleCells = 0, nAccelCells = 0, nFluidCells = 0; double ux, uy, uz; double minU2 = 1e+30, maxU2 = -1e+30, u2; double minRho = 1e+30, maxRho = -1e+30, rho; double mass = 0; SWEEP_VAR SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z ) rho = + LOCAL( grid, C ) + LOCAL( grid, N ) + LOCAL( grid, S ) + LOCAL( grid, E ) + LOCAL( grid, W ) + LOCAL( grid, T ) + LOCAL( grid, B ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) + LOCAL( grid, SE ) + LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) + LOCAL( grid, ST ) + LOCAL( grid, SB ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) + LOCAL( grid, WT ) + LOCAL( grid, WB ); if( rho < minRho ) minRho = rho; if( rho > maxRho ) maxRho = rho; mass += rho; if( TEST_FLAG_SWEEP( grid, OBSTACLE )) { nObstacleCells++; } else { if( TEST_FLAG_SWEEP( grid, ACCEL )) nAccelCells++; else nFluidCells++; ux = + LOCAL( grid, E ) - LOCAL( grid, W ) + LOCAL( grid, NE ) - LOCAL( grid, NW ) + LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) - LOCAL( grid, WT ) - LOCAL( grid, WB ); uy = + LOCAL( grid, N ) - LOCAL( grid, S ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) - LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) - LOCAL( grid, ST ) - LOCAL( grid, SB ); uz = + LOCAL( grid, T ) - LOCAL( grid, B ) + LOCAL( grid, NT ) - LOCAL( grid, NB ) + LOCAL( grid, ST ) - LOCAL( grid, SB ) + LOCAL( grid, ET ) - LOCAL( grid, EB ) + LOCAL( grid, WT ) - LOCAL( grid, WB ); u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho); if( u2 < minU2 ) minU2 = u2; if( u2 > maxU2 ) maxU2 = u2; } SWEEP_END printf( "LBM_showGridStatistics:\n" "\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n" "\tminRho: %8.4f maxRho: %8.4f mass: %e\n" "\tminU: %e maxU: %e\n\n", nObstacleCells, nAccelCells, nFluidCells, minRho, maxRho, mass, sqrt( minU2 ), sqrt( maxU2 ) ); }<LOOP-END> <OMP-START>#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_11/lbm.c
#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \
100
) + 3.0) - u2); SWEEP_END /* outflow */ /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP <LOOP-START>ux2, uy2, uz2, rho2, u2, px, py ) SWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z ) rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); uy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ); uz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 /= rho1; uy1 /= rho1; uz1 /= rho1; rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); uy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ); uz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 /= rho2; uy2 /= rho2; uz2 /= rho2; rho = 1.0; ux = 2*ux1 - ux2; uy = 2*uy1 - uy2; uz = 2*uz1 - uz2; u2 = 1.5 * (ux*ux + uy*uy + uz*uz); LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2); LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END } /*############################################################################*/ void LBM_showGridStatistics( LBM_Grid grid ) { int nObstacleCells = 0, nAccelCells = 0, nFluidCells = 0; double ux, uy, uz; double minU2 = 1e+30, maxU2 = -1e+30, u2; double minRho = 1e+30, maxRho = -1e+30, rho; double mass = 0; SWEEP_VAR SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z ) rho = + LOCAL( grid, C ) + LOCAL( grid, N ) + LOCAL( grid, S ) + LOCAL( grid, E ) + LOCAL( grid, W ) + LOCAL( grid, T ) + LOCAL( grid, B ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) + LOCAL( grid, SE ) + LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) + LOCAL( grid, ST ) + LOCAL( grid, SB ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) + LOCAL( grid, WT ) + LOCAL( grid, WB ); if( rho < minRho ) minRho = rho; if( rho > maxRho ) maxRho = rho; mass += rho; if( TEST_FLAG_SWEEP( grid, OBSTACLE )) { nObstacleCells++; } else { if( TEST_FLAG_SWEEP( grid, ACCEL )) nAccelCells++; else nFluidCells++; ux = + LOCAL( grid, E ) - LOCAL( grid, W ) + LOCAL( grid, NE ) - LOCAL( grid, NW ) + LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) - LOCAL( grid, WT ) - LOCAL( grid, WB ); uy = + LOCAL( grid, N ) - LOCAL( grid, S ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) - LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) - LOCAL( grid, ST ) - LOCAL( grid, SB ); uz = + LOCAL( grid, T ) - LOCAL( grid, B ) + LOCAL( grid, NT ) - LOCAL( grid, NB ) + LOCAL( grid, ST ) - LOCAL( grid, SB ) + LOCAL( grid, ET ) - LOCAL( grid, EB ) + LOCAL( grid, WT ) - LOCAL( grid, WB ); u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho); if( u2 < minU2 ) minU2 = u2; if( u2 > maxU2 ) maxU2 = u2; } SWEEP_END printf( "LBM_showGridStatistics:\n" "\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n" "\tminRho: %8.4f maxRho: %8.4f mass: %e\n" "\tminU: %e maxU: %e\n\n", nObstacleCells, nAccelCells, nFluidCells, minRho, maxRho, mass, sqrt( minU2 ), sqrt( maxU2 ) ); }<LOOP-END> <OMP-START>#pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Instrument/test.c
#pragma omp parallel for
100
de <omp.h> #include <stdio.h> void fct1(int k,int l[]) { int i; int p; printf("Start\n"); <LOOP-START>for(i=0;i<4;i++) { printf("LOOPA%d\n",i); p+=i; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Instrument/test.c
#pragma omp parallel for
100
("LOOPA%d\n",i); p+=i; } for(i=0;i<4;i++) { printf("LOOPB%d\n",i); p+=i; } <LOOP-START>for(i=0;i<4;i++) { p+=i;printf("LOOPC%d thread__%d\n",i,omp_get_thread_num()); }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Capture_Replay/test.c
#pragma omp parallel for
100
de <omp.h> #include <stdio.h> void fct1(int k,int l[]) { int i; int p; printf("Start\n"); <LOOP-START>for(i=0;i<4;i++) { printf("LOOPA%d\n",i); p+=i; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Capture_Replay/test.c
#pragma omp parallel for
100
("LOOPA%d\n",i); p+=i; } for(i=0;i<4;i++) { printf("LOOPB%d\n",i); p+=i; } <LOOP-START>for(i=0;i<4;i++) { p+=i;printf("LOOPC%d thread__%d\n",i,omp_get_thread_num()); }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/benchmark-subsetting/cere/tests/test_Omp_Blackscholes/blackscholes.m4.cpp
#pragma omp parallel for private(i, price, priceDelta)
100
int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP <LOOP-START>for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } } }<LOOP-END> <OMP-START>#pragma omp parallel for private(i, price, priceDelta)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/allKnown.c
#pragma omp parallel for
100
gma omp section { testThisNonLeaf: #pragma omp critical { x = x + 6; } } } im51: <LOOP-START>for (iter = 0; iter < 8; iter++) { int x1; int y1; x1 += my(8); foo(x1, y1, 1); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/cfgTests/parallelForConstruct.c
#pragma omp parallel for private(a)
100
int main() { int x = 10; int a; <LOOP-START>for(x = 0; x < 10; x++) { a -= 10; }<LOOP-END> <OMP-START>#pragma omp parallel for private(a)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/cfgTests/allCFG.c
#pragma omp parallel for
100
a omp section { testThisNonLeaf:; #pragma omp critical { x = x + 6; } } } im51:; <LOOP-START>for (iter = 0; iter < 8; iter++) { int x1; int y1; x1 += my(8); foo(x1, y1, 1); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/src/imop/lib/testcases/simplification/test5.c
#pragma omp parallel for ordered default(shared) private(i)
100
int main () { int i; int j = 10; <LOOP-START>for(i = 0; i < j; i++) { #pragma omp atomic update i = i + 1; }<LOOP-END> <OMP-START>#pragma omp parallel for ordered default(shared) private(i)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/enforcer.c
#pragma omp parallel for
100
ns { #pragma omp section x++; #pragma omp section x+=2; } #pragma omp single #pragma omp task <LOOP-START>for (x = 0; x < 12; x++) x = x + 0; #pragma omp parallel sections { #pragma omp section x++; #pragma omp section x+=2; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/final-preproc/sp-b.c
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
100
} } } static void ninvr(void) { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; <LOOP-START>for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r3; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = -r2; rhs[1][i][j][k] = r1; rhs[2][i][j][k] = bt * ( r4 - r5 ); rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } }<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/final-preproc/sp-b.c
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
100
} } } static void pinvr(void) { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; <LOOP-START>for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r1; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = bt * ( r4 - r5 ); rhs[1][i][j][k] = -r3; rhs[2][i][j][k] = r2; rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } }<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
#pragma omp parallel for schedule(SCHEDULING_METHOD)
100
* * @g: the graph */ void initialize_graph(graph* g) { DEBUG("initializing the graph\n"); <LOOP-START>for (int i = 0; i < g->N; i++) { node* u = elem_at(&g->vertices, i); payload* u_data = malloc(sizeof(payload)); u->data = u_data; u_data->fragment_id = u->label; u_data->tmp_fragment_id = u->label; u_data->received_first_message = 0; u_data->b = NULL; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
#pragma omp parallel for schedule(SCHEDULING_METHOD)
100
ueuelist* msgs, queuelist* tmp_msgs, queuelist* blues) { DEBUG("planting root messages\n"); <LOOP-START>for (int i = 0; i < g->N; i++) { node* u = elem_at(&g->vertices, i); payload* u_data = u->data; u_data->received_first_message = 0; /* Only roots find the blue edge */ if (u_data->fragment_id != u->label) continue; message m = {-1}; enqueue(msgs, u->label, &m); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
#pragma omp parallel for schedule(SCHEDULING_METHOD)
100
while (nodes_yet_to_recv) { DEBUG("propagating the messages across the graph\n"); <LOOP-START>for (int i = 0; i < g->N; i++) { node* u = elem_at(&g->vertices, i); payload* u_data = u->data; if (u_data->received_first_message) continue; while (!is_ql_queue_empty(msgs, u->label)) { u_data->received_first_message = 1; message* m = dequeue(msgs, u->label); for (int j = 0; j < u->degree; j++) { node* v = *((node**) elem_at(&u->neighbors, j)); payload* v_data = v->data; /* Don't send the message back to the source */ if (v->label == m->from) continue; /** * If the neighbor is outside the fragment it's a potential * blue edge. Otherwise it's just a carrier for this message. */ if (v_data->fragment_id != u_data->fragment_id) { edge b = {u->label, v->label, g->adj_mat[u->label][v->label]}; enqueue(blues, u_data->fragment_id, &b); } else { message mx = {u->label}; enqueue(tmp_msgs, v->label, &mx); } } } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
#pragma omp parallel for schedule(SCHEDULING_METHOD)
100
} } } DEBUG("moving messages from tmp_msgs to msgs\n"); <LOOP-START>for (int i = 0; i < g->N; i++) { node* u = elem_at(&g->vertices, i); payload* u_data = u->data; while (!is_ql_queue_empty(tmp_msgs, u->label)) { message* m = dequeue(tmp_msgs, u->label); if (!u_data->received_first_message) enqueue(msgs, u->label, m); } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
#pragma omp parallel for schedule(SCHEDULING_METHOD)
100
_yet_to_recv = 0; DEBUG("checking if there are any more nodes left to process\n"); <LOOP-START>for (int i = 0; i < g->N; i++) { node* u = elem_at(&g->vertices, i); payload* u_data = u->data; if (!u_data->received_first_message) nodes_yet_to_recv = 1; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>