filename
stringlengths
78
241
omp_pragma_line
stringlengths
24
416
context_chars
int64
100
100
text
stringlengths
152
177k
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try12/pivot.c
#pragma omp parallel for num_threads(thread_count)
100
printf("c_n_m = %d, each_thread_works = %d\n", c_n_m(n, m), each_thread_works); <LOOP-START>for (int __thread__ = 0; __thread__ < thread_count; __thread__++) { struct timeval start1, end1; gettimeofday(&start1, NULL); // ********************************************************* int base_index = __thread__ * each_thread_works; int end_index = base_index + each_thread_works; if (end_index > c_n_m(n, m)) { end_index = c_n_m(n, m); } for (int i = base_index; i < end_index; i++) { for (int j = 0; j < m; j++) { // small_cache[__thread__][j] = object[i].values[j] * n; small_cache[__thread__ * m + j] = object[i].values[j]; } for (int __i__ = 0; __i__ < n; __i__++) { for (int __j__ = 0; __j__ < n; __j__++) { if (__i__ > __j__) { object[i].cost += calcOneChebyshevDistance( __i__, __j__, __thread__); } } } } gettimeofday(&end1, NULL); printf("thread %d finished, time = %lf ms\n", __thread__, (end1.tv_sec - start1.tv_sec) * 1000 + (end1.tv_usec - start1.tv_usec) / 1000.0); // ********************************************************* }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(thread_count)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try8/pivot.c
#pragma omp parallel for num_threads(thread_count)
100
nDistanceAndStoreInArray() { // when adding this pragma, the program can be really fast! // <LOOP-START>for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { euclidean_distance[i * n + j] = get_distance(i, j); } // printf("calcEuclideanDistanceAndStoreInArray: %d\n", i); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(thread_count)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try8/pivot.c
#pragma omp parallel for num_threads(thread_count)
100
printf("c_n_m = %d, each_thread_works = %d\n", c_n_m(n, m), each_thread_works); <LOOP-START>for (int __thread__ = 0; __thread__ < thread_count; __thread__++) { struct timeval start1, end1; gettimeofday(&start1, NULL); // ********************************************************* // int chebyshev_matrix_set = 0; double **chebyshev_matrix = (double **)malloc(sizeof(double *) * n); for (int j = 0; j < n; j++) { chebyshev_matrix[j] = (double *)malloc(sizeof(double) * n); }; int base_index = __thread__ * each_thread_works; int end_index = base_index + each_thread_works; if (end_index > c_n_m(n, m)) { end_index = c_n_m(n, m); } for (int i = base_index; i < end_index; i++) { calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix, object[i].values); object[i].cost = add_all_entries_of_matrix(chebyshev_matrix); } for (int j = 0; j < n; j++) { free(chebyshev_matrix[j]); } free(chebyshev_matrix); gettimeofday(&end1, NULL); printf("thread %d finished, time = %lf ms\n", __thread__, (end1.tv_sec - start1.tv_sec) * 1000 + (end1.tv_usec - start1.tv_usec) / 1000.0); // ********************************************************* }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(thread_count)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try4/pivot.c
#pragma omp parallel for num_threads(thread_count)
100
nDistanceAndStoreInArray() { // when adding this pragma, the program can be really fast! // <LOOP-START>for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { euclidean_distance[i * n + j] = get_distance(i, j); } // printf("calcEuclideanDistanceAndStoreInArray: %d\n", i); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(thread_count)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/void-echo/SDU-Parallel-Lab/LAB/try4/pivot.c
#pragma omp parallel for num_threads(thread_count)
100
chebyshev_matrix; // omp_lock_t writelock; // omp_init_lock(&writelock); // <LOOP-START>for (int i = 0; i < c_n_m(n, m); i++) { combination *com = next_combination(); // very quick if (com == NULL) { break; } chebyshev_matrix = (float **)malloc(sizeof(float *) * n); for (int j = 0; j < n; j++) { chebyshev_matrix[j] = (float *)malloc(sizeof(float) * n); } int *values = com->values; // struct timeval start, end; gettimeofday(&start, NULL); calcAllChebyshevDistanceAndStoreInArray(chebyshev_matrix, values); // very slow!! gettimeofday(&end, NULL); printf("calcAllChebyshevDistanceAndStoreInArray() time: %ld ms\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000); float res = add_all_entries_of_matrix(chebyshev_matrix); // float res = 0; com->cost = res; // store the combination in object array // object had been fully allocated in the beginning. store_in_object(com); for (int j = 0; j < n; j++) { free(chebyshev_matrix[j]); } free(chebyshev_matrix); free(com->values); free(com); if (res_index % 1000 == 0) { printf("combination %d finished, i = %d \n\n", res_index, i); } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(thread_count)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Abhiramborige/Parallel_programs_C/sum_for_reduction.c
#pragma omp parallel for default(shared) private(i) reduction(+:sum)
100
; double t1,t2; for(int i=0; i<MAX; i++){ array[i]=1; } t1=omp_get_wtime(); int i; <LOOP-START>for(i=0; i<MAX; i++){ sum+=array[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i) reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Abhiramborige/Parallel_programs_C/private.c
#pragma omp parallel for firstprivate(x)
100
#include<stdio.h> #include<omp.h> int main(){ int x=44;int i; <LOOP-START>for(i=0; i<10; i++){ x=i; printf("Thread no. %d and x = %d\n", omp_get_thread_num(), x); }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c
#pragma omp parallel for schedule(static) reduction (+:s)
100
task_list* l = task_list_init(); int s = 0; // A nice for in parallel with openMP <LOOP-START>for (int j = 0; j < N; j++) { // We create the structure to hold the ints struct data d = {j, &s}; /* We log the task * We give it the info j which is the number that it is adding */ log_task(&l, "Sum", j, omp_get_thread_num(), sum, (void*) &d); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction (+:s)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c
#pragma omp parallel for schedule(dynamic) reduction (+:s)
100
or_static.svg", 1); // And we free the list of tasks l = task_list_init(); s = 0; <LOOP-START>for (int j = 0; j < N; j++) { struct data d = {j, &s}; log_task(&l, "Sum", j, omp_get_thread_num(), sum, (void*) &d); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic) reduction (+:s)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/GuilloteauQ/omp-logs/examples/for_policies.c
#pragma omp parallel for schedule(guided) reduction (+:s)
100
*) &d); } tasks_to_svg(l, "for_dynamic.svg", 1); l = task_list_init(); s = 0; <LOOP-START>for (int j = 0; j < N; j++) { struct data d = {j, &s}; log_task(&l, "Sum", j, omp_get_thread_num(), sum, (void*) &d); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided) reduction (+:s)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abagali1/mandelbrot/parallel/mandelbrot_openmp.c
#pragma omp parallel for
100
(*colors)[X][3] = malloc(sizeof(uchar[Y][X][3])); Color* palette = make_palette(MAX_ITER); <LOOP-START>for(int Py = 0; Py < Y; Py++){ for(int Px = 0; Px < X; Px++){ Color c = mandelbrot(Px, Py, palette); colors[Py][Px][0] = c.r; colors[Py][Px][1] = c.g; colors[Py][Px][2] = c.b; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/abagali1/mandelbrot/parallel/mandelbrot_cuda.c
#pragma omp parallel for
100
(*colors)[X][3] = malloc(sizeof(uchar[Y][X][3])); Color* palette = make_palette(MAX_ITER); <LOOP-START>for(int Py = 0; Py < Y; Py++){ for(int Px = 0; Px < X; Px++){ Color c = mandelbrot(Px, Py, palette); colors[Py][Px][0] = c.r; colors[Py][Px][1] = c.g; colors[Py][Px][2] = c.b; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c
#pragma omp parallel for
100
NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } <LOOP-START>for (i = 0; i < m; i++) { new[i] = alloc_1d_dbl(n); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c
#pragma omp parallel for
100
(n); } return (new); } void bpnn_randomize_weights(double **w,int m,int n) { int i, j; <LOOP-START>for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = dpn1(); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c
#pragma omp parallel for
100
w[i][j] = dpn1(); } } } void bpnn_zero_weights(double **w,int m,int n) { int i, j; <LOOP-START>for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c
#pragma omp parallel for
100
e((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); <LOOP-START>for (i = 0; i <= n1; i++) { free((char *) net->input_weights[i]); free((char *) net->input_prev_weights[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/PAC-P2P/BPNN-Face-Recognition-For-Parallel/src/backprop.c
#pragma omp parallel for
100
_weights[i]); } free((char *) net->input_weights); free((char *) net->input_prev_weights); <LOOP-START>for (i = 0; i <= n2; i++) { free((char *) net->hidden_weights[i]); free((char *) net->hidden_prev_weights[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>