filename
stringlengths
78
241
omp_pragma_line
stringlengths
24
416
context_chars
int64
100
100
text
stringlengths
152
177k
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
enerate final results //--------------------------------------------------------------------- //<LOOP-START>for (j = 1; j < nrows; j++) { nzloc[j] = nzloc[j] + nzloc[j-1]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************
100
omp parallel for for (j = 1; j < nrows; j++) { nzloc[j] = nzloc[j] + nzloc[j-1]; } //<LOOP-START>for (j = 0; j < nrows; j++) { if (j > 0) { j1 = rowstr[j] - nzloc[j-1]; } else { j1 = 0; } j2 = rowstr[j+1] - nzloc[j]; nza = rowstr[j]; for (k = j1; k < j2; k++) { a[k] = a[nza]; colidx[k] = colidx[nza]; nza = nza + 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
k < j2; k++) { a[k] = a[nza]; colidx[k] = colidx[nza]; nza = nza + 1; } } <LOOP-START>for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] - nzloc[j-1]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for,*************** error openmp cannot be used with break statement******************
100
---------------------------------------------------------------- logical was_gen = false; //<LOOP-START>for (ii = 0; ii < nzv; ii++) { if (iv[ii] == i) { was_gen = true; break; } }<LOOP-END> <OMP-START>#pragma omp parallel for,*************** error openmp cannot be used with break statement******************<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
ouble v[], int iv[], int *nzv, int i, double val) { int k; logical set; set = false; { <LOOP-START>for (k = 0; k < *nzv; k++) { if (iv[k] == i) { v[k] = val; set = true; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/minar09/parallel-computing/OpenMP/gs_openmp.c
#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)
100
w nor the last row are solved // (that's why both 'i' and 'j' start at 1 and go up to '[nm]-1') <LOOP-START>for (int i = 1; i < n-1; i++) { for (int j = 1; j < m-1; j++) { const int pos = (i * m) + j; const float temp = (*mat)[pos]; (*mat)[pos] = 0.25f * ( (*mat)[pos] + (*mat)[pos - 1] + (*mat)[pos - n] + (*mat)[pos + 1] + (*mat)[pos + n] ); diff += abs((*mat)[pos] - temp); } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/minar09/parallel-computing/OpenMP/jacobi_openmp.c
#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)
100
w nor the last row are solved // (that's why both 'i' and 'j' start at 1 and go up to '[nm]-1') <LOOP-START>for (int i = 1; i < n-1; i++) { for (int j = 1; j < m-1; j++) { const int pos = (i * m) + j; temp[i][j] = 0.25f * ( (*mat)[pos] + (*mat)[pos - 1] + (*mat)[pos - n] + (*mat)[pos + 1] + (*mat)[pos + n] ); diff += abs((*mat)[pos] - temp[i][j]); } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/dkarageo/Distributed_KNN/source/knn.c
#pragma omp parallel for
100
every point in points matrix, find its kNNs in data matrix and // store them into results. <LOOP-START>for (int p = 0; p < pointc; p++) { // Calculate the k nearest neighbors for the current point, by // searching on all the available data. for (int d = 0; d < matrix_get_rows(data); d++) { // Calculate the euclidian distance between a queried point and // a data point. double dist = 0.0; for (int i = 0; i < matrix_get_cols(points); i++) { dist += pow(matrix_get_cell(points, p, i) - matrix_get_cell(data, d, i), 2.0); } dist = pow(dist, 0.5); // On first k data, just fill the k positions in results array. if (d < k) { results[p][d].distance = dist; results[p][d].index = i_offset + d; // When last position in results array gets filled, sort data. if (d == k-1) { qsort(results[p], k, sizeof(struct KNN_Pair), KNN_Pair_asc_comp); } } // Every row in results is initialized and sorted. So if // current distance is lesser than the distance of the last nearest // neighbor, previous value is replaced by current one. else if (dist < results[p][k-1].distance) { results[p][k-1].distance = dist; // Keep track on the index. i_offset is used as the base for // all indexes. results[p][k-1].index = i_offset + d; qsort(results[p], k, sizeof(struct KNN_Pair), KNN_Pair_asc_comp); } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/envelope-project/laik/examples/spmv2.c
#pragma omp parallel for schedule(dynamic,50)
100
dexing, from 0) laik_get_map_1d(resD, rangeNo, (void**) &res, &rcount); #ifdef _OPENMP <LOOP-START>for(int64_t r = fromRow; r < toRow; r++) { res[r - fromRow] = 0.0; for(int o = m->row[r]; o < m->row[r+1]; o++) res[r - fromRow] += m->val[o] * inp[m->col[o]]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic,50)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c
#pragma omp parallel for reduction(+:myresult)
100
ons reduction(+:result) { #pragma omp section { double myresult = 0; <LOOP-START>for( int jj = 0; jj < N; jj++ ) myresult += heavy_work_0( array[jj] ); result += myresult; } #pragma omp section { double myresult = 0; #pragma omp parallel for reduction(+:myresult) for( int jj = 0; jj < N; jj++ ) myresult += heavy_work_1( array[jj] ); result += myresult; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:myresult)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c
#pragma omp parallel for reduction(+:myresult)
100
ult += myresult; } #pragma omp section { double myresult = 0; <LOOP-START>for( int jj = 0; jj < N; jj++ ) myresult += heavy_work_1( array[jj] ); result += myresult; } #pragma omp section { double myresult = 0; #pragma omp parallel for reduction(+:myresult) for( int jj = 0; jj < N; jj++ ) myresult += heavy_work_2( array[jj] ); result += myresult; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:myresult)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/examples_sections/01_sections_nested.c
#pragma omp parallel for reduction(+:myresult)
100
ult += myresult; } #pragma omp section { double myresult = 0; <LOOP-START>for( int jj = 0; jj < N; jj++ ) myresult += heavy_work_2( array[jj] ); result += myresult; } } } double tend = CPU_TIME; /* ----------------------------------------------------------------------------- * finalize * ----------------------------------------------------------------------------- */ free(array); printf("The result is %g\nrun took %g of wall-clock time\n\n", result, tend - tstart ); return 0; } double heavy_work_0( uint N ) { double guess = 3.141572 / 3; for( int i = 0; i < N; i++ ) { guess = exp( guess ); guess = sin( guess ); } return guess; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:myresult)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/OpenMP/threads_affinity/06_touch_by_all.c
#pragma omp parallel for
100
as // the parallel for has the // scheduling as the final one double _tstart = CPU_TIME_th; <LOOP-START>for ( int ii = 0; ii < N; ii++ ) array[ii] = (double)ii; double _tend = CPU_TIME_th; printf("init takes %g\n", _tend - _tstart); /* ----------------------------------------------------------------------------- * calculate * ----------------------------------------------------------------------------- */ double S = 0; // this will store the summation double th_avg_time = 0; // this will be the average thread runtime double th_min_time = 1e11; // this will be the min thread runtime. // contrasting the average and the min // time taken by the threads, you may // have an idea of the unbalance. double tstart = CPU_TIME; #if !defined(_OPENMP) for ( int ii = 0; ii < N; ii++ ) // well, you may notice this implementation S += array[ii]; // is particularly inefficient anyway #else #pragma omp parallel reduction(+:th_avg_time) \ reduction(min:th_min_time) // in this region there are 2 different { // reductions: the one of runtime, which struct timespec myts; // happens in the whole parallel region; double mystart = CPU_TIME_th; // and the one on S, which takes place #pragma omp for reduction(+:S) // in the for loop. for ( int ii = 0; ii < N; ii++ ) S += array[ii]; th_avg_time = CPU_TIME_th - mystart; th_min_time = CPU_TIME_th - mystart; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c
#pragma omp parallel for
100
Number of Threads requested = %i\n",k); } #endif /* Get initial value for system clock. */ <LOOP-START>for (j=0; j<N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c
#pragma omp parallel for
100
("Your clock granularity appears to be " "less than one microsecond.\n"); t = mysecond(); <LOOP-START>for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel { LIKWID_MARKER_START("COPY"); #pragma omp for for (j=0; j<N; j++) c[j] = a[j]; LIKWID_MARKER_STOP("COPY"); } times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel { LIKWID_MARKER_START("SCALE"); #pragma omp for for (j=0; j<N; j++) b[j] = scalar*c[j]; LIKWID_MARKER_STOP("SCALE"); } times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel { LIKWID_MARKER_START("ADD"); #pragma omp for for (j=0; j<N; j++) c[j] = a[j]+b[j]; LIKWID_MARKER_STOP("ADD"); } times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel { LIKWID_MARKER_START("TRIAD"); #pragma omp for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; LIKWID_MARKER_STOP("TRIAD"); } times[3][k] = mysecond() - times[3][k]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c
#pragma omp parallel for
100
f \n",csum); } else { printf ("Solution Validates\n"); } } void tuned_STREAM_Copy() { int j; <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c
#pragma omp parallel for
100
for (j=0; j<N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; <LOOP-START>for (j=0; j<N; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream-mapi.c
#pragma omp parallel for
100
omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
mber of Threads requested = %i\n",k); } #endif /* Get initial value for system clock. */ //<LOOP-START>for (j=0; j<N; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
("Your clock granularity appears to be " "less than one microsecond.\n"); t = mysecond(); <LOOP-START>for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]; times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
(k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]; times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
- times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else <LOOP-START>for (j=0; j<N; j++) b[j] = scalar*c[j]; times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
second() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]+b[j]; times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
- times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else <LOOP-START>for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
f \n",csum); } else { printf ("Solution Validates\n"); } } void tuned_STREAM_Copy() { int j; <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
for (j=0; j<N; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { int j; <LOOP-START>for (j=0; j<N; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; #pragma omp parallel for for (j=0; j<N; j++) c[j] = a[j]+b[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Foundations-of-HPC/Foundations_of_HPC_2021/Libraries-Benchmarking/likwid/stream.c
#pragma omp parallel for
100
omp parallel for for (j=0; j<N; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; <LOOP-START>for (j=0; j<N; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(double scalar) { int j; #pragma omp parallel for for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/berkerdemirel/Parallel-Breadth-First-Search-OpenMP-and-CUDA/CPU/hybrid.cpp
#pragma omp parallel for reduction(+:nf) reduction(||:improvement) schedule(guided, 32)
100
*distance, int &level, int nov, int *unvisited, int uvSize) { bool improvement = false; nf = 0; <LOOP-START>for (int i = 0; i < uvSize; i++) { int v = unvisited[i]; if (distance[v] < 0) { for (int j = row_inv[v]; j < row_inv[v + 1]; j++) { int u = col_inv[j]; if (distance[u] == level) { distance[v] = level + 1; nf++; improvement = true; break; } } } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:nf) reduction(||:improvement) schedule(guided, 32)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp
#pragma omp parallel for
100
ck(unsigned long *A, unsigned long *B, const int lda, const int ldb, const int block_size) { // <LOOP-START>// for(int i=0; i<block_size; i++) { // for(int j=0; j<block_size; j++) { // B[j*ldb + i] = A[i*lda +j]; // } // }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp
#pragma omp parallel for
100
ned long *B, const int n, const int m, const int lda, const int ldb, const int block_size) { // <LOOP-START>// for(int i=0; i<n; i+=block_size) { // for(int j=0; j<m; j+=block_size) { // transpose_scalar_block(&A[i*lda +j], &B[j*ldb + i], lda, ldb, block_size); // } // }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp
#pragma omp parallel for
100
} // } void transpose(unsigned long *src, unsigned long *dst, const int N, const int M) { <LOOP-START>for(int n = 0; n<N*M; n++) { int i = n/N; int j = n%N; dst[n] = src[M*j + i]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp
#pragma omp parallel for
100
for all consecutive parallel regions } // #pragma omp parallel // #pragma omp for <LOOP-START>for (int i = 0; i < n; ++i) { rows[i*m] = x[i*m]; for (int j = 1; j < m; ++j) { rows[i*m + j] = x[i*m + j] + rows[i*m + j - 1]; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LucaAngioloni/ParallelComputingExam/Integral_Images_(final-term)/Code/OpenMP/openmp_integral_image.cpp
#pragma omp parallel for
100
} else { // out[i*m + j] = x[i*m + j]; // } // } // } <LOOP-START>for (int i = 0; i < m; ++i) { rows[i*n] = out[i*n]; for (int j = 1; j < n; ++j) { rows[i*n + j] = out[i*n + j] + rows[i*n + j - 1]; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_kernels/vertex_cover.c
#pragma omp parallel for private(u,v,j,k)
100
memblock1 + 2*G->n; position_e = memblock1 + 2*(G->n + G->m); n = G->n; #ifdef _OPENMP <LOOP-START>for(i=0; i<n; i++) { wp_v[i] = G->dbl_weight_v[i]; degree_v[i] = G->numEdges[i+1] - G->numEdges[i]; visited_v[i] = 0; if(degree_v[i] == 0) visited_v[i]=1; for(j=G->numEdges[i]; j<G->numEdges[i+1]; j++) { u = i; v = G->endV[j]; delta_e[j] = 0; visited_e[j] = 0; if(v < u ) continue; /* we have already covered this case when we visited v. */ for (k=G->numEdges[v]; k<G->numEdges[v+1]; k++) { if(G->endV[k] == u) break; } position_e[j] = k; position_e[k] = j; } }<LOOP-END> <OMP-START>#pragma omp parallel for private(u,v,j,k)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_kernels/vertex_cover.c
#pragma omp parallel for shared(max,max_e,max_u,max_v) private(j,u,v)
100
edge_counter = 2*G->m; while(edge_counter > 0) { max = 0; #ifdef _OPENMP <LOOP-START>for(i=0; i<n; i++) { if(degree_v[i] == 0) continue; for(j=G->numEdges[i]; j<G->numEdges[i+1]; j++) { u = i; v = G->endV[j]; if(degree_v[u] + degree_v[v] > max) { max = degree_v[u]+ degree_v[v]; max_e = j; max_u = u; max_v = v; } } }<LOOP-END> <OMP-START>#pragma omp parallel for shared(max,max_e,max_u,max_v) private(j,u,v)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Bader-Research/snap-graph/src/graph_partitioning/modularity_spectral.c
#pragma omp parallel for shared(communitySize)
100
ices belong to this community and updating the vertex Vector accordingly. */ /* <LOOP-START>reduction(+:degreeSum) */ for(i=0; i<G->n; i++) { if(v2C[i] == curCommunity) { { communitySize++; vertex[communitySize] = i; } v2pos[i] = communitySize; degreeSum += G->numEdges[i+1]-G->numEdges[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for shared(communitySize) <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtLambda2Expression.C
#pragma omp parallel for
100
, const InputType * gradY, const InputType * gradZ, double *lambda2, const int numTuples) { <LOOP-START>for (int i = 0; i < numTuples; ++i) { const int offset = 3*i; const double du[3] = { gradX[offset], gradX[offset+1], gradX[offset+2] }; const double dv[3] = { gradY[offset], gradY[offset+1], gradY[offset+2] }; const double dw[3] = { gradZ[offset], gradZ[offset+1], gradZ[offset+2] }; COMPUTE_LAMBDA2 lambda2[i] = lambda[1]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtQCriterionExpression.C
#pragma omp parallel for
100
dX, const InputType * gradY, const InputType * gradZ, double *qCrit, const int numTuples) { <LOOP-START>for (int i = 0; i < numTuples; ++i) { const int offset = 3*i; const InputType du[3] = { gradX[offset], gradX[offset+1], gradX[offset+2] }; const InputType dv[3] = { gradY[offset], gradY[offset+1], gradY[offset+2] }; const InputType dw[3] = { gradZ[offset], gradZ[offset+1], gradZ[offset+2] }; COMPUTE_Q_CRIT(InputType, qCrit[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtMagnitudeExpression.C
#pragma omp parallel for
100
d ompCalculateMagnitude(const ArrayType *vectorIn, ArrayType *scalarOut, const int numTuples) { <LOOP-START>for (vtkIdType i = 0; i < numTuples ; ++i) { const vtkIdType idx = 3*i; scalarOut[i] = sqrt((double)vectorIn[idx+0]*(double)vectorIn[idx+0]+ (double)vectorIn[idx+1]*(double)vectorIn[idx+1]+ (double)vectorIn[idx+2]*(double)vectorIn[idx+2]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/visit-dav/visit/src/avt/Expressions/General/avtGradientExpression.C
#pragma omp parallel for private(i,j,k) shared(in,out)
100
} } else { #ifdef _OPENMP #pragma message("Compiling for OpenMP.") #endif <LOOP-START>for (k = 0 ; k < dims2 ; k++) { for (j = 0 ; j < dims1 ; j++) { for (i = 0 ; i < dims0 ; i++) { int index = k*kskip + j*jskip + i*iskip; int vec_index = 3*index; float *pt1 = pts + 3*(index+iskip); float *pt2 = pts + 3*(index-iskip); if ((i > 0) && (i < (dims0-1))) out[vec_index] = in[index+iskip]-in[index-iskip]; else if (i == 0) { pt2 = pts + 3*index; out[vec_index] = in[index+iskip] - in[index]; } else // i == dims0-1 { pt1 = pts + 3*index; out[vec_index] = in[index] - in[index-iskip]; } float diff[3]; diff[0] = pt1[0] - pt2[0]; diff[1] = pt1[1] - pt2[1]; diff[2] = pt1[2] - pt2[2]; float dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]); if (dist == 0.) out[vec_index++] = 0.; else out[vec_index++] /= dist; pt1 = pts + 3*(index+jskip); pt2 = pts + 3*(index-jskip); if ((j > 0) && (j < (dims1-1))) out[vec_index] = in[index+jskip] - in[index-jskip]; else if (j == 0) { pt2 = pts + 3*index; out[vec_index] = in[index+jskip] - in[index]; } else // j == dims1-1 { pt1 = pts + 3*index; out[vec_index] = in[index] - in[index-jskip]; } diff[0] = pt1[0] - pt2[0]; diff[1] = pt1[1] - pt2[1]; diff[2] = pt1[2] - pt2[2]; dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]); if (dist == 0.) out[vec_index++] = 0.; else out[vec_index++] /= dist; pt1 = pts + 3*(index+kskip); pt2 = pts + 3*(index-kskip); if ((k > 0) && (k < (dims2-1))) out[vec_index] = in[index+kskip] - in[index-kskip]; else if (k == 0) { pt2 = pts + 3*index; out[vec_index] = in[index+kskip] - in[index]; } else // k == dims2-1 { pt1 = pts + 3*index; out[vec_index] = in[index] - in[index-kskip]; } diff[0] = pt1[0] - pt2[0]; diff[1] = pt1[1] - pt2[1]; diff[2] = pt1[2] - pt2[2]; dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]); if (dist == 0.) out[vec_index++] = 0.; else out[vec_index++] /= dist; } } }<LOOP-END> <OMP-START>#pragma omp parallel for private(i,j,k) shared(in,out)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c
#pragma omp parallel for
100
float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; <LOOP-START>for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c
#pragma omp parallel for
100
float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; <LOOP-START>for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c
#pragma omp parallel for
100
float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; <LOOP-START>for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/NumPower/numpower/src/dnn.c
#pragma omp parallel for
100
float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; <LOOP-START>for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/assorted/selection/2024_selection_project/bitonic_sort/src/bitonic_omp.cpp
#pragma omp parallel for private(increasing)
100
itr = (size_t)log2((double)size); bool increasing; for (size_t i = 0; i < itr; ++i) { <LOOP-START>for (size_t j = 0; j < size; j += groupSize) { increasing = ((j / groupSize) % 2 == 0); bitonicMerge(numbers, left + j, groupSize, increasing); }<LOOP-END> <OMP-START>#pragma omp parallel for private(increasing)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/07_optimisation/optim_sols/openmp_optim.c
#pragma omp parallel for
100
} int i = 0; for (int step=0; step < n; ++step){ // update the new array <LOOP-START>for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { int N = neighbours(x, y, w, h, cells); int idx = y * w + x; bool is_on = cells[idx]; bool new_val = 0; if (is_on){ if (N < A || N > B) new_val = 0; else new_val = 1; }else{ if (N == C) new_val = 1; } buffer[idx] = new_val; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/01_omp/04_array_sum.cpp
#pragma omp parallel for reduction(+:global_total)
100
0; // the reduction syntax is something like (operation:variable), and openmp handles most things. <LOOP-START>for (int i=0; i < N; ++i){ global_total += v[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:global_total)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/02_omp_vectorised/02_pi.cpp
#pragma omp parallel for private(x) reduction(+ \
100
counter int i; // how many loop iterations should we do int n = ceil((1.0 - 0.0) / dx); <LOOP-START>: ans) for (i = 0; i < n; ++i) { // get x x = i * dx; // increment ans ans += dx * 4.0 / (1.0 + x * x); }<LOOP-END> <OMP-START>#pragma omp parallel for private(x) reduction(+ \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/WitsHPC/HPC-InterestGroup/talks/programming/parallel_programming/02_omp_vectorised/02_pi.cpp
#pragma omp parallel for simd reduction(+ \
100
ans = 0; double x; int i; int n = ceil(1.0 / dx); // only change -> add in parallel for <LOOP-START>: ans) // loop is the same for (i = 0; i < n; ++i) { x = i * dx; ans += dx * 4.0 / (1.0 + x * x); }<LOOP-END> <OMP-START>#pragma omp parallel for simd reduction(+ \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/balos1/Shi_Tomasi_Feature_Detection/openmp/stfd.c
#pragma omp parallel for private(j)
100
e_width, int windowsize, data_wrapper_t *eigenvalues) { int w = floor(windowsize/2); int i, j; <LOOP-START>for (i = 0; i < image_height; i++) { for (j = 0; j < image_width; j++) { float ixx_sum = 0; float iyy_sum = 0; float ixiy_sum = 0; for (int k = 0; k < windowsize; k++) { for (int m = 0; m < windowsize; m++) { int offseti = -1 * w + k; int offsetj = -1 * w + m; if (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width){ ixx_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * hgrad[(i +offseti) * image_width + (j + offsetj)]; iyy_sum += vgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)]; ixiy_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)]; } } } eigenvalues[i*image_width+j].x = i; eigenvalues[i*image_width+j].y = j; eigenvalues[i*image_width+j].data = min_eigenvalue(ixx_sum, ixiy_sum, ixiy_sum, iyy_sum); } }<LOOP-END> <OMP-START>#pragma omp parallel for private(j)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/balos1/Shi_Tomasi_Feature_Detection/openmp/stfd.c
#pragma omp parallel for private(j)
100
e, int image_width, int image_height, int kernel_width, int kernel_height, int half) { int i, j; <LOOP-START>for (i = 0; i < image_height; i++) { for (j = 0; j < image_width; j++) { // reset accumulator when "focused" pixel changes float sum = 0.0; // for each item in the kernel for (int k = 0; k < kernel_height; k++) { for (int m = 0; m < kernel_width; m++) { int offseti = -1 * (kernel_height/2) + k; int offsetj = -1 * (kernel_width/2) + m; // Check to make sure we are in the bounds of the image. if (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width) sum+=(float)(image[(i+offseti) * image_width + (j+offsetj)])*kernel[k*kernel_width +m]; } } resultimage[i * image_width + j] = sum; } }<LOOP-END> <OMP-START>#pragma omp parallel for private(j)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c
#pragma omp parallel for reduction(+:sum) private(diff)
100
eads();//NB; bx = sizex/nbx + ((sizex%nbx) ? 1 : 0);//sizex/nbx; nby = 1;//NB; by = sizey/nby; <LOOP-START>for (int ii=0; ii<nbx; ii++) { for (int jj=0; jj<nby; jj++) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } } } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) private(diff)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c
#pragma omp parallel for reduction(+:sum) private(diff, lsw)
100
= sizex/nbx + ((sizex%nbx) ? 1 : 0); nby = 1; by = sizey/nby; // */ // Computing "Red" blocks <LOOP-START>for (int ii=0; ii<nbx; ii++) { lsw = ii%2; for (int jj=lsw; jj<nby; jj=jj+2) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) private(diff, lsw)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c
#pragma omp parallel for reduction(+:sum) private(diff, lsw)
100
diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } } } <LOOP-START>// Computing "Black" blocks for (int ii=0; ii<nbx; ii++) { lsw = (ii+1)%2; for (int jj=lsw; jj<nby; jj=jj+2) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) private(diff, lsw)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/joaomlneto/cpds-heat/omp/solver-omp.c
#pragma omp parallel for reduction(+:sum) private(diff)
100
omp_get_max_threads(); bx = sizex/nbx + ((sizex%nbx) ? 1 : 0); nby = 1; by = sizey/nby; // */ <LOOP-START>for (int ii=0; ii<nbx; ii++) for (int jj=0; jj<nby; jj++) for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) private(diff)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C
#pragma omp parallel for reduction (+: n)
100
706 // { dg-do run } // { dg-options "-std=c++11" } template <typename T> T foo () { T n = T (); <LOOP-START>for (T i = [](){ return 3; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (+: n)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C
#pragma omp parallel for reduction (+: n)
100
return 3; }(); i < 10; ++i) n++; return n; } template <typename T> T bar () { T n = T (); <LOOP-START>for (T i = [](){ return 1; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (+: n)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/pr58706.C
#pragma omp parallel for reduction (+: n)
100
return 4; }(); i < 10; ++i) n++; return n; } template <typename T> T baz () { T n = T (); <LOOP-START>for (T i = T (); i < [](){ return 7; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (+: n)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/simd14.C
#pragma omp parallel for simd linear(r) linear(s:17ULL) linear(t:2)
100
e__((noinline, noclone)) int foo (unsigned long long &s, short *&t) { int i, j = 0; int &r = j; <LOOP-START>for (i = 0; i < 1024; i++) bar (r, s, t); return j; } int main () { int i; for (i = 0; i < 2048; i++) b[i] = 3 * i; unsigned long long s = 12; short *t = b; int j = foo (s, t); for (i = 0; i < 1024; i++) if (a[i] != 12 + 24 * i) __builtin_abort (); if (j != 1024 || s != 12 + 1024 * 17ULL || t != &b[2048]) __builtin_abort (); }<LOOP-END> <OMP-START>#pragma omp parallel for simd linear(r) linear(s:17ULL) linear(t:2)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C
#pragma omp parallel for private (a) reduction(|:R::r)
100
a(e) {} Q a; int &b; void m1 (); }; int f[64]; template <typename Q> void A<Q>::m1 () { r = 0; <LOOP-START>for (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); #pragma omp parallel for simd linear (R::r) for (R::r = 0; r < 32; R::r++) f[r + 8] |= 1; for (int i = 0; i < 64; i++) if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0)) __builtin_abort (); #pragma omp parallel for lastprivate (T<Q>::t) for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t += 3) f[T<Q>::t + 2] |= 2; if (T<Q>::t != 33) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0))) __builtin_abort (); #pragma omp simd linear (T<Q>::t) for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t++) f[T<Q>::t + 9] |= 4; if (T<Q>::t != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); r = 0; #pragma omp parallel for reduction(|:r) for (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); #pragma omp parallel for simd for (R::r = 0; r < 32; R::r += 2) f[r + 8] |= 8; for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); #pragma omp simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (a = 0; A::a < 8; a++) f[((T<Q>::t << 2) | a) + 3] |= 16; if (T<Q>::t != 8 || A::a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? 16 : 0))) __builtin_abort (); T<Q>::t = 32; a = 16; #pragma omp parallel #pragma omp single #pragma omp taskloop simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (A::a = 0; a < 8; A::a++) f[((T<Q>::t << 2) | A::a) + 3] |= 32; if (T<Q>::t != 8 || a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); #pragma omp parallel #pragma omp single #pragma omp taskloop simd for (R::r = 0; r < 31; R::r += 2) f[r + 8] |= 64; if (r != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); } int main () { A<int> a; a.m1 (); }<LOOP-END> <OMP-START>#pragma omp parallel for private (a) reduction(|:R::r)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C
#pragma omp parallel for simd linear (R::r)
100
r (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); <LOOP-START>for (R::r = 0; r < 32; R::r++) f[r + 8] |= 1; for (int i = 0; i < 64; i++) if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0)) __builtin_abort (); #pragma omp parallel for lastprivate (T<Q>::t) for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t += 3) f[T<Q>::t + 2] |= 2; if (T<Q>::t != 33) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0))) __builtin_abort (); #pragma omp simd linear (T<Q>::t) for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t++) f[T<Q>::t + 9] |= 4; if (T<Q>::t != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); r = 0; #pragma omp parallel for reduction(|:r) for (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); #pragma omp parallel for simd for (R::r = 0; r < 32; R::r += 2) f[r + 8] |= 8; for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); #pragma omp simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (a = 0; A::a < 8; a++) f[((T<Q>::t << 2) | a) + 3] |= 16; if (T<Q>::t != 8 || A::a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? 16 : 0))) __builtin_abort (); T<Q>::t = 32; a = 16; #pragma omp parallel #pragma omp single #pragma omp taskloop simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (A::a = 0; a < 8; A::a++) f[((T<Q>::t << 2) | A::a) + 3] |= 32; if (T<Q>::t != 8 || a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); #pragma omp parallel #pragma omp single #pragma omp taskloop simd for (R::r = 0; r < 31; R::r += 2) f[r + 8] |= 64; if (r != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); } int main () { A<int> a; a.m1 (); }<LOOP-END> <OMP-START>#pragma omp parallel for simd linear (R::r)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C
#pragma omp parallel for lastprivate (T<Q>::t)
100
t i = 0; i < 64; i++) if (f[i] != ((i >= 8 && i < 32 + 8) ? 1 : 0)) __builtin_abort (); <LOOP-START>for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t += 3) f[T<Q>::t + 2] |= 2; if (T<Q>::t != 33) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0))) __builtin_abort (); #pragma omp simd linear (T<Q>::t) for (T<Q>::t = 0; T<Q>::t < 32; T<Q>::t++) f[T<Q>::t + 9] |= 4; if (T<Q>::t != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? 1 : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); r = 0; #pragma omp parallel for reduction(|:r) for (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); #pragma omp parallel for simd for (R::r = 0; r < 32; R::r += 2) f[r + 8] |= 8; for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); #pragma omp simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (a = 0; A::a < 8; a++) f[((T<Q>::t << 2) | a) + 3] |= 16; if (T<Q>::t != 8 || A::a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? 16 : 0))) __builtin_abort (); T<Q>::t = 32; a = 16; #pragma omp parallel #pragma omp single #pragma omp taskloop simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (A::a = 0; a < 8; A::a++) f[((T<Q>::t << 2) | A::a) + 3] |= 32; if (T<Q>::t != 8 || a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); #pragma omp parallel #pragma omp single #pragma omp taskloop simd for (R::r = 0; r < 31; R::r += 2) f[r + 8] |= 64; if (r != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); } int main () { A<int> a; a.m1 (); }<LOOP-END> <OMP-START>#pragma omp parallel for lastprivate (T<Q>::t)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C
#pragma omp parallel for reduction(|:r)
100
2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); r = 0; <LOOP-START>for (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); #pragma omp parallel for simd for (R::r = 0; r < 32; R::r += 2) f[r + 8] |= 8; for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); #pragma omp simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (a = 0; A::a < 8; a++) f[((T<Q>::t << 2) | a) + 3] |= 16; if (T<Q>::t != 8 || A::a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? 16 : 0))) __builtin_abort (); T<Q>::t = 32; a = 16; #pragma omp parallel #pragma omp single #pragma omp taskloop simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (A::a = 0; a < 8; A::a++) f[((T<Q>::t << 2) | A::a) + 3] |= 32; if (T<Q>::t != 8 || a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); #pragma omp parallel #pragma omp single #pragma omp taskloop simd for (R::r = 0; r < 31; R::r += 2) f[r + 8] |= 64; if (r != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); } int main () { A<int> a; a.m1 (); }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(|:r)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/member-4.C
#pragma omp parallel for simd
100
r (a = 0; A::a < 31; a += 2) r |= (1 << A::a); if (r != 0x55555555) __builtin_abort (); <LOOP-START>for (R::r = 0; r < 32; R::r += 2) f[r + 8] |= 8; for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0))) __builtin_abort (); #pragma omp simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (a = 0; A::a < 8; a++) f[((T<Q>::t << 2) | a) + 3] |= 16; if (T<Q>::t != 8 || A::a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? 16 : 0))) __builtin_abort (); T<Q>::t = 32; a = 16; #pragma omp parallel #pragma omp single #pragma omp taskloop simd collapse(2) for (T<Q>::t = 0; T<Q>::t < 7; T<Q>::t += 2) for (A::a = 0; a < 8; A::a++) f[((T<Q>::t << 2) | A::a) + 3] |= 32; if (T<Q>::t != 8 || a != 8) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); #pragma omp parallel #pragma omp single #pragma omp taskloop simd for (R::r = 0; r < 31; R::r += 2) f[r + 8] |= 64; if (r != 32) __builtin_abort (); for (int i = 0; i < 64; i++) if (f[i] != (((i >= 8 && i < 32 + 8) ? ((i & 1) ? 1 : (64 | 8 | 1)) : 0) | ((i >= 2 && i < 32 + 2 && (i - 2) % 3 == 0) ? 2 : 0) | ((i >= 9 && i < 32 + 9) ? 4 : 0) | ((i >= 3 && i < 32 + 3) ? (16 | 32) : 0))) __builtin_abort (); } int main () { A<int> a; a.m1 (); }<LOOP-END> <OMP-START>#pragma omp parallel for simd<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-6.C
#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \
100
*y, B<long> (&w)[1][2]) { A<unsigned long long> a[9]; short bb[5] = {}; short (&b)[5] = bb; <LOOP-START>reduction(*:y[:3]) reduction(|:a[:4]) \ reduction(&:w[0:][:2]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == 2) y[1].t *= 7; if ((i & 63) == 3) y[2].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-6.C
#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \
100
), w(w3), z(), a(), b() {} __attribute__((noinline, noclone)) void foo (); }; void S::foo () { <LOOP-START>reduction(*:y[:3]) reduction(|:a[:4]) \ reduction(&:w[0:][:2]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == 2) y[1].t *= 7; if ((i & 63) == 3) y[2].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-4.C
#pragma omp parallel for reduction (min:f) reduction (max:j)
100
(void); template <typename I, typename F> void foo () { I j = -10000; F f = 1024.0; int i; <LOOP-START>for (i = 0; i < 4; i++) switch (i) { case 0: if (j < -16) j = -16; break; case 1: if (f > -2.0) f = -2.0; break; case 2: if (j < 8) j = 8; if (f > 9.0) f = 9.0; break; case 3: break; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (min:f) reduction (max:j)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-4.C
#pragma omp parallel for reduction (min:f) reduction (max:j)
100
!= 8 || f != -2.0) abort (); } int main () { int j = -10000; float f = 1024.0; int i; <LOOP-START>for (i = 0; i < 4; i++) switch (i) { case 0: if (j < -16) j = -16; break; case 1: if (f > -2.0) f = -2.0; break; case 2: if (j < 8) j = 8; if (f > 9.0) f = 9.0; break; case 3: break; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction (min:f) reduction (max:j)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C
#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
100
72 || l != 1) abort (); } void f2 (J<int> x, J<int> y, J<int> z) { int f = 0, n = 0, m = 0; <LOOP-START>num_threads (8) schedule (static, 9) \ collapse (6 - 2) for (I<int> i = x.end () - 1; i >= x.begin (); --i) for (int l = -131; l >= -131; l--) for (I<int> j = y.end (); j > y.begin () - 1; j -= 1) { for (I<int> k = z.end () - 4; k >= z.begin () + 3; k--) if (omp_get_num_threads () == 8 && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k) != (omp_get_thread_num () * 9 + f++))) n++; else m++; }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (f) reduction (+:n, m) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C
#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
100
rt (); } template <typename T> void f4 (J<int> x, J<int> y, J<int> z) { int f = 0, n = 0, m = 0; <LOOP-START>num_threads (8) schedule (static, 9) \ collapse (5 - 2) for (I<int> i = x.end () - 1; i >= x.begin (); --i) { for (I<int> j = y.end (); j > y.begin () - 1; j -= 1) { for (I<int> k = z.end () - 4; k >= z.begin () + 3; k--) if (omp_get_num_threads () == 8 && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k) != (omp_get_thread_num () * 9 + f++))) n++; else m++; } }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (f) reduction (+:n, m) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C
#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
100
rt (); } template <typename T> void f6 (J<int> x, J<int> y, J<int> z) { int f = 0, n = 0, m = 0; <LOOP-START>num_threads (8) schedule (static, 9) \ collapse (5 - 2) for (I<int> i = x.end () - 1; i >= x.begin (); --i) { for (I<int> j = y.end (); j > y.begin () - 1; j -= 1) { for (I<int> k = z.end () - 4; k >= z.begin () + (T) 3; k--) if (omp_get_num_threads () == 8 && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k) != (omp_get_thread_num () * 9 + f++))) n++; else m++; } }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (f) reduction (+:n, m) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C
#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
100
abort (); } template <typename T> void f8 (J<T> x, J<T> y, J<T> z) { T f = 0, n = 0, m = 0; <LOOP-START>num_threads (8) schedule (static, 9) \ collapse (6 - 2) for (I<T> i = x.end () - 1; i >= x.begin (); --i) for (T l = 0; l < 1; l++) for (I<T> j = y.end (); j > y.begin () - 1; j -= 1) { for (I<T> k = z.end () - 4; k >= z.begin () + 3; k--) if (omp_get_num_threads () == 8 && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k) != (omp_get_thread_num () * 9 + f++))) n++; else m++; }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (f) reduction (+:n, m) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/collapse-2.C
#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
100
; } template <typename S, typename T> void f10 (J<T> x, J<T> y, J<T> z) { T f = 0, n = 0, m = 0; <LOOP-START>num_threads (8) schedule (static, 9) \ collapse (6 - 2) for (S i = x.end () - 1; i >= x.begin (); --i) for (T l = 0; l < 1; l++) for (S j = y.end (); j > y.begin () - 1; j -= 1) { for (S k = z.end () - 4; k >= z.begin () + 3; k--) if (omp_get_num_threads () == 8 && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k) != (omp_get_thread_num () * 9 + f++))) n++; else m++; }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (f) reduction (+:n, m) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
d baz (int i) { if (i < 0 || i >= 2000) abort (); results[i]++; } void f1 (int x, int y) { <LOOP-START>for (int i = x; i <= y; i += 6) baz (i); } void f2 (int x, int y) { int i; #pragma omp parallel for private(i) for (i = x; i < y - 1; i = 1 - 6 + 7 + i) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for private(i)
100
mp parallel for for (int i = x; i <= y; i += 6) baz (i); } void f2 (int x, int y) { int i; <LOOP-START>for (i = x; i < y - 1; i = 1 - 6 + 7 + i) baz (i); } template <typename T> void f3 (int x, int y) { #pragma omp parallel for for (int i = x; i <= y; i = i + 9 - 8) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for private(i)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
i = x; i < y - 1; i = 1 - 6 + 7 + i) baz (i); } template <typename T> void f3 (int x, int y) { <LOOP-START>for (int i = x; i <= y; i = i + 9 - 8) baz (i); } template <typename T> void f4 (int x, int y) { int i; #pragma omp parallel for lastprivate(i) for (i = x + 2000 - 64; i > y + 10; --i) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for lastprivate(i)
100
= x; i <= y; i = i + 9 - 8) baz (i); } template <typename T> void f4 (int x, int y) { int i; <LOOP-START>for (i = x + 2000 - 64; i > y + 10; --i) baz (i); } void f5 (int x, int y) { #pragma omp parallel for for (int i = x + 2000 - 64; i > y + 10L; i -= 10L) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for lastprivate(i)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
lastprivate(i) for (i = x + 2000 - 64; i > y + 10; --i) baz (i); } void f5 (int x, int y) { <LOOP-START>for (int i = x + 2000 - 64; i > y + 10L; i -= 10L) baz (i); } template <int N> void f6 (int x, int y) { #pragma omp parallel for for (int i = x + 2000 - 64; i > y + 10L; i = i - 12 + 2L) baz (i + N); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
i = x + 2000 - 64; i > y + 10L; i -= 10L) baz (i); } template <int N> void f6 (int x, int y) { <LOOP-START>for (int i = x + 2000 - 64; i > y + 10L; i = i - 12 + 2L) baz (i + N); } template <long N> void f7 (int i, int x, int y) { #pragma omp parallel for for (i = x - 10; i <= y + 10; i += N) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
i > y + 10L; i = i - 12 + 2L) baz (i + N); } template <long N> void f7 (int i, int x, int y) { <LOOP-START>for (i = x - 10; i <= y + 10; i += N) baz (i); } template <long N> void f8 (J<int> j) { int i; #pragma omp parallel for for (i = j.begin (); i <= j.end () + N; i += 2) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
r (i = x - 10; i <= y + 10; i += N) baz (i); } template <long N> void f8 (J<int> j) { int i; <LOOP-START>for (i = j.begin (); i <= j.end () + N; i += 2) baz (i); } template <typename T, long N> void f9 (T x, T y) { #pragma omp parallel for for (T i = x; i <= y; i = i + N) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
n (); i <= j.end () + N; i += 2) baz (i); } template <typename T, long N> void f9 (T x, T y) { <LOOP-START>for (T i = x; i <= y; i = i + N) baz (i); } template <typename T, long N> void f10 (T x, T y) { T i; #pragma omp parallel for for (i = x; i > y; i = i + N) baz (i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
= x; i <= y; i = i + N) baz (i); } template <typename T, long N> void f10 (T x, T y) { T i; <LOOP-START>for (i = x; i > y; i = i + N) baz (i); } template <typename T> void f11 (T x, long y) { #pragma omp parallel { #pragma omp for nowait for (T i = x; i <= y; i += 3L) baz (i); #pragma omp single baz (y + 3); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/for-2.C
#pragma omp parallel for
100
(i); #pragma omp single baz (y + 3); } } template <typename T> void f12 (T x, T y) { T i; <LOOP-START>for (i = x; i > y; --i) baz (i); } #define check(expr) \ for (int i = 0; i < 2000; i++) \ if (expr) \ { \ if (results[i] != 1) \ abort (); \ results[i] = 0; \ }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/nested-1.C
#pragma omp parallel for num_threads(2) shared (i)
100
// { dg-do run } extern "C" void abort(void); #define N 1000 int foo() { int i = 0, j; <LOOP-START>for (j = 0; j < N; ++j) { #pragma omp parallel num_threads(1) shared (i) { #pragma omp atomic i++; } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(2) shared (i)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-9.C
#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \
100
o (int (*&x)[3][N], int *y, long (&w)[1][N]) { unsigned long long a[9] = {}; short b[5] = {}; <LOOP-START>reduction(*:y[:3]) reduction(|:a[:4]) \ reduction(&:w[0:][:N]) reduction(max:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[0] *= 3; if ((i & 31) == N) y[1] *= 7; if ((i & 63) == 3) y[N] *= 17; z[i / 32] += (i & 3); if (i < 4) z[i] += i; a[i / 32] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[N]) b[N] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-9.C
#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \
100
bb) {} __attribute__((noinline, noclone)) void foo (); }; template <int N> void S<N>::foo () { <LOOP-START>reduction(*:y[:3]) reduction(|:a[:4]) \ reduction(&:w[0:][:N]) reduction(max:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[0] *= 3; if ((i & 31) == N) y[1] *= 7; if ((i & 63) == 3) y[N] *= 17; z[i / 32] += (i & 3); if (i < 4) z[i] += i; a[i / 32] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[N]) b[N] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:N][:][0:N], z[:4]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/target-2.C
#pragma omp parallel for reduction(+:s)
100
, er[x:x]) \ map(to: fr[0:x], gr[0:x], hr[2 * x:x], ir[2 * x:x]) \ map(tofrom: s) <LOOP-START>for (j = 0; j < x; j++) s += br[j] * cr[j] + dr[x + j] + er[x + j] + fr[j] + gr[j] + hr[2 * x + j] + ir[2 * x + j]; return s; } int main () { double d[1024]; double ebuf[1024]; double *e = ebuf; fn1 (br, cr, 128); fn1 (d + 128, e + 128, 128); fn1 (fr, gr, 128); double h = fn2 (128, d, e); if (h != 20416.0) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:s)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-7.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \
100
p7]; for (int i = 0; i < p7 + 4; i++) { if (i < p7) b[i] = -6; a[i] = 0; } <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5]) \ reduction(&:w[0:p6 - 1][:p6]) reduction(max:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[0] *= 3; if ((i & 31) == 2) y[1] *= 7; if ((i & 63) == 3) y[2] *= 17; z[i / 32] += (i & 3); if (i < 4) z[i] += i; a[i / 32] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-7.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \
100
nt, long, short); }; void S::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7) { <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5]) \ reduction(&:w[0:p6 - 1][:p6]) reduction(max:b[0:p7]) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1] += i; if ((i & 15) == 1) y[0] *= 3; if ((i & 31) == 2) y[1] *= 7; if ((i & 63) == 3) y[2] *= 17; z[i / 32] += (i & 3); if (i < 4) z[i] += i; a[i / 32] |= 1ULL << (i & 30); w[0][i & 1] &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-8.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \
100
a[p7 + 4]; short bb[p7]; short (&b)[p7] = bb; for (int i = 0; i < p7; i++) bb[i] = -6; <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5]) \ reduction(&:w[0:p6 - 1][:p6]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == 2) y[1].t *= 7; if ((i & 63) == 3) y[2].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2], z[:p3]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-8.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:2], z[:p3]) \
100
nt, long, short); }; void S::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7) { <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5]) \ reduction(&:w[0:p6 - 1][:p6]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == 2) y[1].t *= 7; if ((i & 63) == 3) y[2].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[2]) b[2] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:2], z[:p3]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-10.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2 + N - 2], z[:p3]) \
100
a[p7 + 4]; short bb[p7]; short (&b)[p7] = bb; for (int i = 0; i < p7; i++) bb[i] = -6; <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5 - N + 2]) \ reduction(&:w[0:p6 - 3 + N][:p6]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == N) y[1].t *= 7; if ((i & 63) == 3) y[N].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[N]) b[N] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2 + N - 2], z[:p3]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/reduction-10.C
#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:N], z[:p3 + N - 2]) \
100
template <int N> void S<N>::foo (int p1, long p2, long p3, int p4, int p5, long p6, short p7) { <LOOP-START>reduction(*:y[:p4]) reduction(|:a[:p5]) \ reduction(&:w[0:p6 - 3 + N][:p6]) reduction(maxb:b) for (int i = 0; i < 128; i++) { x[i / 64][i % 3][(i / 4) & 1].t += i; if ((i & 15) == 1) y[0].t *= 3; if ((i & 31) == N) y[1].t *= 7; if ((i & 63) == 3) y[N].t *= 17; z[i / 32].t += (i & 3); if (i < 4) z[i].t += i; a[i / 32].t |= 1ULL << (i & 30); w[0][i & 1].t &= ~(1L << (i / 17 * 3)); if ((i % 79) > b[0]) b[0] = i % 79; if ((i % 13) > b[1]) b[1] = i % 13; if ((i % 23) > b[N]) b[N] = i % 23; if ((i % 85) > b[3]) b[3] = i % 85; if ((i % 192) > b[4]) b[4] = i % 192; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:x[0:p1 + 1][:p2][0:N], z[:p3 + N - 2]) \<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
de <string.h> int test1 () { short int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/HPDCS/ULMT-OpenMP-GCC/testsuite/libgomp.c++/loop-8.C
#pragma omp parallel for
100
i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); <LOOP-START>for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 () { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; p < &buf[54]; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; p <= &buf[63]; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p < &buf[51]; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; p > &buf[9]; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; p >= &buf[3]; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; p > &buf[15]; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>