filename
stringlengths 78
241
| omp_pragma_line
stringlengths 24
416
| context_chars
int64 100
100
| text
stringlengths 152
177k
|
|---|---|---|---|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
would have happened.
*/
DEBUG("finding the minimum of the accumulated blue edges\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
/* only roots find the blue edge */
if (u_data->fragment_id != u->label)
continue;
edge* min_edge = NULL;
while (!is_ql_queue_empty(blues, u->label)) {
edge* b = dequeue(blues, u->label);
if (min_edge == NULL) {
min_edge = b;
continue;
}
/**
* So this might look like some kind of weird logic, but there's a
* reason why I'm doing this: say there are two different fragments
* with blue edges into each other. If the two blue edges are the
* same, it's perfectly fine -- it'll be resolved in the conflicting
* blue edges scennario. However, if they're different, they'll
* both be added to the MST when it should only be one of them. To
* prevent this, we'll simply use the edge that has a smaller value
* of (u*N + v). Note that both will have the exact same weight,
* so it's fine whichever one we choose.
*/
int b_score = b->u*g->N + b->v;
if (b->u > b->v)
b_score = b->v*g->N + b->u;
int min_score = min_edge->u*g->N + min_edge->v;
if (min_edge->u > min_edge->v)
min_score = min_edge->v*g->N + min_edge->u;
if ((b->w < min_edge->w) || (b->w == min_edge->w && b_score < min_score))
min_edge = b;
}
node* future_leader = elem_at(&g->vertices, min_edge->u);
payload* future_leader_data = future_leader->data;
u_data->b = min_edge;
future_leader_data->b = min_edge;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
@g: the graph
*/
void assign_tmp_fragments(graph* g) {
DEBUG("setting tmp_fragment_id\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
node* leader = elem_at(&g->vertices, u_data->fragment_id);
payload* leader_data = leader->data;
u_data->tmp_fragment_id = leader_data->b->u;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
data->tmp_fragment_id = leader_data->b->u;
}
DEBUG("setting temporary fragment_id\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
u_data->fragment_id = u_data->tmp_fragment_id;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mst.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
r.
*/
for (int ok = 0; ok < 2; ok++) {
DEBUG("conflicts phase: %d\n", ok);
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
if (u_data->fragment_id != u->label)
continue;
#pragma omp critical
{
node* v = elem_at(&g->vertices, u_data->b->v);
payload* v_data = v->data;
node* v_leader = elem_at(&g->vertices, v_data->fragment_id);
payload* v_leader_data = v_leader->data;
int conflicting_merges = (u->label == v_leader_data->b->v &&
v_leader_data->b->u == v->label &&
u_data->b->v == v->label);
if (conflicting_merges == ok) {
change_fragment(g, u->label, v_leader->label);
edge m = {u->label, v->label, g->adj_mat[u->label][v->label]};
enqueue(mst, 0, &m);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
r each node.
*
* @g: a pointer to the graph object
*/
void generate_random_field(graph* g) {
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (!data->present)
continue;
data->r = randnum();
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
entering the MIS.
*
* @g: a pointer to the graph object
*/
void decide_mis_entry(graph* g) {
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (!data->present)
continue;
int enter = 1;
for (int i = 0; i < cur->degree; i++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, i));
payload* neighbor_data = neighbor->data;
if (data->r > neighbor_data->r) {
enter = 0;
break;
}
}
if (enter) {
data->present = 0;
data->in_mis = 1;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
ve them.
*
* @g: a pointer to the graph object
*/
void remove_mis_adjacent_nodes(graph* g) {
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->in_mis) {
for (int i = 0; i < cur->degree; i++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, i));
payload* neighbor_data = neighbor->data;
neighbor_data->present = 0;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/mis.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
nodes, returns 0 otherwise.
*/
int do_present_nodes_exist(graph* g) {
int keep_going = 0;
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->present)
keep_going = 1;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
sors, int N) {
int ret = 0;
DEBUG("checking if there are any undecided processors\n");
<LOOP-START>for (int i = 0; i < N; i++) {
processor* p = processors+i;
if (p->decided == 0)
ret = 1;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
st_vote(processor* processors, int N, queuelist* vote_ql) {
DEBUG("broadcasting votes\n");
<LOOP-START>for (int i = 0; i < N; i++) {
processor* p = processors+i;
for (int j = 0; j < N; j++) {
if (i == j)
continue;
vote v = {i, p->vote};
enqueue(vote_ql, j, &v);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/byzantine.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
a queuelist of votes
*/
void receive_votes(processor* processors, int N, queuelist* vote_ql) {
<LOOP-START>for (int i = 0; i < N; i++) {
processor* p = processors+i;
int yes = 0;
int no = 0;
while (!is_ql_queue_empty(vote_ql, i)) {
vote* v = dequeue(vote_ql, i);
if (v->vote)
yes++;
else
no++;
}
int maj = 1;
int tally = yes;
if (no > yes) {
maj = 0;
tally = no;
}
int threshold;
if (rand() % 2 == 0)
threshold = L;
else
threshold = H;
if (tally > threshold)
p->vote = maj;
else
p->vote = 0;
if (tally >= G) {
p->decided = 1;
p->d = maj;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
messages, 0 otherwise.
*/
int messages_in_queue(int N, queuelist* recv) {
int result = 0;
<LOOP-START>for (int i = 0; i < N; i++) {
if (!is_ql_queue_empty(recv, i))
result = 1;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
d(graph* g, queuelist* recv, queuelist* send) {
DEBUG("receiving and sending messages\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
int lowest_y = INT_MAX;
int lowest_from = 0;
while (!is_ql_queue_empty(recv, i)) {
message* m = dequeue(recv, i);
if (lowest_y > m->y) {
lowest_y = m->y;
lowest_from = m->from;
}
}
if (lowest_y != INT_MAX && lowest_y < u_data->distance) {
u_data->distance = lowest_y;
for (int j = 0; j < u->degree; j++) {
node* v = *((node**) elem_at(&u->neighbors, j));
payload* u_data = u->data;
if (v->label == lowest_from)
continue;
message m = {u->label, lowest_y+1};
enqueue(send, v->label, &m);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bellman_ford.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
recv, queuelist* send) {
DEBUG("propagating messages from the send queuelist to recv\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
while (!is_ql_queue_empty(send, u->label))
enqueue(recv, u->label, dequeue(send, u->label));
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
k and 0
* if none of the nodes need to colored.
*/
int again(graph* g) {
int result = 0;
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
if (u_data->again)
result = 1;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
ts children.
*
* @g: the graph
*/
void parent_to_child(graph* g) {
DEBUG("starting\n");
<LOOP-START>for(int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
if (u->label != ROOT) {
node* parent = elem_at(&g->vertices, u_data->parent);
payload* parent_data = parent->data;
u_data->recv = parent_data->color;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/vertex_coloring.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
void six_color_tree(graph *g, int digits) {
DEBUG("starting\n");
parent_to_child(g);
<LOOP-START>for(int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
if(u->label == ROOT)
continue;
u_data->again = 0;
int xor = u_data->recv ^ u_data->color;
for(int k = 0; k < digits; k++) {
int mask = 1 << k;
/* If they have this bit different, color */
if(xor & mask) {
u_data->color = (k << 1) + (u_data->color & mask ? 1 : 0);
break;
}
}
if(u_data->color >= 6)
u_data->again = 1;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_dp.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
neighbor's x value.
*
* @g: a pointer to the graph
*/
void calculate_temporary_x(graph* g) {
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
processor* p = cur->data;
int new_x = p->x;
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
processor* neighbor_p = neighbor->data;
if (new_x < neighbor_p->x)
new_x = neighbor_p->x;
}
p->new_x = new_x;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_dp.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
if there was a change.
*/
int propagate_temporary_x(graph* g) {
int something_changed = 0;
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
processor* p = cur->data;
if (p->new_x != p->x)
something_changed = 1;
p->x = p->new_x;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
he graph nodes with the payload data.
*
* @g: the graph
*/
void initialize_graph(graph* g) {
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* data = malloc(sizeof(payload));
data->color = WHITE;
data->joined = 0;
initialize_vector(&data->W, sizeof(node*));
int* visited = malloc(g->N * sizeof(int));
memset(visited, 0, g->N * sizeof(int));
initialize_vector(&data->n2, sizeof(node*));
for (int j = 0; j < v->degree; j++) {
node* u = *((node**) elem_at(&v->neighbors, j));
if (visited[u->label])
continue;
visited[u->label] = 1;
append_to_vector(&data->n2, &u);
for (int k = 0; k < u->degree; k++) {
node* w = *((node**) elem_at(&u->neighbors, k));
if (w == v)
continue;
if (visited[w->label])
continue;
visited[w->label] = 1;
append_to_vector(&data->n2, &w);
}
}
append_to_vector(&data->n2, &v);
free(visited);
v->data = data;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
de (WHITE nodes), 0
* otherwise.
*/
int unjoined_nodes_exist(graph* g) {
int result = 0;
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* data = v->data;
if (data->color == WHITE) {
DEBUG("%d->color = WHITE\n", v->label);
result = 1;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
w for each vertex.
*
* @g: the graph
*/
void compute_w(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
v_data->W.used = 0;
if (v_data->color == WHITE)
append_to_vector(&v_data->W, &v);
for (int j = 0; j < v->degree; j++) {
node* u = *((node**) elem_at(&v->neighbors, j));
payload* u_data = u->data;
if (u_data->color == WHITE)
append_to_vector(&v_data->W, &u);
}
v_data->w = v_data->W.used;
DEBUG("%d->w = %d\n", v->label, v_data->w);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
each vertex.
*
* @g: the graph
*/
void compute_w_tilde(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
v_data->w_tilde = ceil_power_of_2(v_data->w);
DEBUG("%d->w_tilde = %d\n", v->label, v_data->w_tilde);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
r each vertex.
*
* @g: the graph
*/
void compute_w_hat(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
int w_hat = 0;
for (int j = 0; j < v_data->n2.used; j++) {
node* u = *((node**) elem_at(&v_data->n2, j));
payload* u_data = u->data;
if (u_data->w_tilde > w_hat)
w_hat = u_data->w_tilde;
}
v_data->w_hat = w_hat;
DEBUG("%d->w_hat = %d\n", v->label, w_hat);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
ctive or not.
*
* @g: the graph
*/
void compute_active(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
if (v_data->w_hat == v_data->w_tilde)
v_data->active = 1;
else
v_data->active = 0;
DEBUG("%d->active = %d\n", v->label, v_data->active);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
ort for each node.
*
* @g: the graph
*/
void compute_s(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
int support = v_data->active;
for (int j = 0; j < v->degree; j++) {
node* u = *((node**) elem_at(&v->neighbors, j));
payload* u_data = u->data;
if (u_data->active)
support++;
}
DEBUG("%d->s = %d\n", v->label, support);
v_data->s = support;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
mized fashion.
*
* @g: the graph
*/
void compute_s_hat(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
int s_hat = 0;
for (int j = 0; j < v_data->W.used; j++) {
node* u = *((node**) elem_at(&v_data->W, j));
payload* u_data = u->data;
if (u_data->s > s_hat)
s_hat = u_data->s;
}
DEBUG("%d->s_hat = %d\n", v->label, s_hat);
v_data->s_hat = s_hat;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
d fashion.
*
* @g: the graph
*/
void compute_candidacy(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
v_data->candidate = 0;
if (v_data->active) {
int r = rand() % (v_data->s_hat);
if (r == 0)
v_data->candidate = 1;
}
DEBUG("%d->candidate = %d\n", v->label, v_data->candidate);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
* of each node).
*
* @g: the graph
*/
void compute_c(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
v_data->c = 0;
for (int j = 0; j < v_data->W.used; j++) {
node* u = *((node**) elem_at(&v_data->W, j));
payload* u_data = u->data;
if (u_data->candidate)
v_data->c++;
}
DEBUG("%d->c = %d\n", v->label, v_data->c);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
es the w value.
*
* @g: the graph
*/
void compute_join(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->W.used == 0)
continue;
int sigma_c = 0;
for (int j = 0; j < v_data->W.used; j++) {
node* u = *((node**) elem_at(&v_data->W, j));
payload* u_data = u->data;
sigma_c += u_data->c;
}
if (v_data->candidate && sigma_c <= 3*v_data->w) {
DEBUG("%d joining\n", v->label);
v_data->color = BLACK;
v_data->joined = 1;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/dominating_set.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
nating set or not).
*
* @g: the graph
*/
void colorize(graph* g) {
DEBUG("starting\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* v = elem_at(&g->vertices, i);
payload* v_data = v->data;
if (v_data->color != WHITE)
continue;
for (int j = 0; j < v->degree; j++) {
node* u = *((node**) elem_at(&v->neighbors, j));
payload* u_data = u->data;
if (u_data->color == BLACK) {
v_data->color = GRAY;
break;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bfs_dijkstra.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
graph object
*/
void initialize_graph(graph* g) {
// allocate the data field for each node
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = malloc(sizeof(payload));
data->parent_label = -1;
data->phase_discovered = -1;
cur->data = data;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/bfs_dijkstra.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
e
* discovered.
*/
int broadcast_start(graph* g, int p) {
int nobody_was_discovered = 1;
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
// this node was just discovered in phase `p`
if (data->phase_discovered == p) {
// we send a "join p+1" message to all quiet neighbors
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
payload* neighbor_data = neighbor->data;
if (neighbor_data->phase_discovered < 0) {
neighbor_data->phase_discovered = p+1;
neighbor_data->parent_label = cur->label;
nobody_was_discovered = 0;
}
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
s `min_active` to its neighbors.
*/
DEBUG("broadcasting `min_active`s\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->committee == g->N+1)
data->min_active = data->default_leader;
else
data->min_active = g->N+1;
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
enqueue(active_ql, neighbor->label, &data->min_active);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
adcasted `min_active`s.
*/
DEBUG("receiving broadcasted transmissions\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
while(!is_ql_queue_empty(active_ql, i)) {
int* active = dequeue(active_ql, i);
data->min_active = min(data->min_active, *active);
}
data->leader = min(data->leader, data->min_active);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
* invite_ql) {
DEBUG("starting selection\n");
DEBUG("creating initial invitations\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->leader == data->default_leader) {
data->invite.x = i;
data->invite.y = data->min_active;
}
else {
data->invite.x = g->N+1;
data->invite.y = g->N+1;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
roadcast invitations to neighbors.
*/
DEBUG("broadcasting invitations\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
for (int j = 0; j < cur->degree; j++) {
node* neighbor = *((node**) elem_at(&cur->neighbors, j));
enqueue(invite_ql, neighbor->label, &data->invite);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
* smallest one.
*/
DEBUG("receiving broadcasted invitations\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
while (!is_ql_queue_empty(invite_ql, i)) {
invitation* invite = dequeue(invite_ql, i);
min_invitation(&data->invite, invite);
}
// make sure the invite is for us
if (data->invite.y == data->default_leader && data->invite.x == data->leader)
data->committee = data->leader;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/kcommittee.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
legalize_committees(graph* g) {
DEBUG("making sure there aren't any illegal committees\n");
<LOOP-START>for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->committee >= g->N)
data->committee = i;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
queuelist* send_ql) {
DEBUG("Generating 2*%d messages for %d processes\n", N, N);
<LOOP-START>for (int i = 0; i < N; i++) {
process* p = processes+i;
/**
* If this node has been asked (or decided) to not send out any more
* original messages (all nodes will always be up for propagating; but
* creating new messages is a privilege that nodes might lose), don't
* do anything.
*/
if (p->status == -1)
continue;
message to_right = {i, 1 << l, 1, 0, 0};
message to_left = {i, 1 << l, -1, 0, 0};
enqueue(send_ql, i, &to_right);
enqueue(send_ql, i, &to_left);
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
queuelist* recv_ql) {
DEBUG("propagating messages on phase %d\n", l);
<LOOP-START>for (int i = 0; i < N; i++) {
DEBUG("i = %d\n", i);
process* p = processes+i;
while (!is_ql_queue_empty(send_ql, i)) {
message* m = dequeue(send_ql, i);
DEBUG("m->starter_label = %d\n", m->starter_label);
/**
* If the starter_label is the current node, then ther are two
* possibilities:
* - this node has returned back home; increment status and be
* done with that message
* - this node never turned direction; this node is the winner
* so make this the leader
*
* Otherwise the message reached the far end. It's time to change
* direction, refresh the number of hops_left and go back.
*/
if (m->starter_label == i && m->hops_left != (1 << l)) {
if (m->stop_initiating)
p->status = -1;
else {
if (m->direction_changed)
p->status++;
else {
p->status = 3;
break;
}
}
continue;
}
if (m->hops_left == 0) {
DEBUG("zero hops left\n");
m->hops_left = 1 << l;
m->direction *= -1;
m->direction_changed = 1;
}
/**
* Make sure this message is good enough to propagate. A message
* passes through a node only if the origin is not lesser than
* the current node's label. A message that passes through a node
* in one direction _will_ pass through the same node when it's
* coming back.
*
* When a node passes a message along, it can no longer win.
* Therefore, it'll mark itself as status = -1, meaning that
* it'll no longer start messages.
*
* If a message is not passed through (m->starter_label < i) then
* the origin must be asked to not pass messages anymore.
*/
if (m->starter_label < i) {
/**
* Of the (1 << l) hops the message intended to complete, it
* has `hops_left` left, implying that it took
* `(1 << l) - hops_left` hops to get here. It'll take exactly
* the same number to go back to its origin.
*/
m->hops_left = (1 << l) - m->hops_left;
m->direction *= -1;
m->direction_changed = 1;
m->stop_initiating = 1;
continue;
}
else {
m->hops_left--;
p->status = -1;
}
int next_label = (N + i + m->direction) % N;
enqueue(recv_ql, next_label, m);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_hs.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
n will automatically set the number of elements to zero after
* the last element.
*/
<LOOP-START>for (int i = 0; i < N; i++) {
process* p = processes+i;
while (!is_ql_queue_empty(recv_ql, i)) {
enqueue(send_ql, i, dequeue(recv_ql, i));
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
cesses
* @n: number of processes
*/
void receive_leaders(process* processes, int N) {
<LOOP-START>for (int i = 0; i < N; i++) {
int next = (i+1) % N;
processes[next].received = processes[i].send;
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
sses
* @n: number of processes
*/
void determine_leaders(process* processes, int N) {
<LOOP-START>for (int i = 0; i < N; i++) {
if (processes[i].received > processes[i].leader) {
processes[i].send = processes[i].received;
processes[i].leader = processes[i].received;
}
else if (processes[i].received == processes[i].id) {
processes[i].leader = processes[i].id;
processes[i].status = 1;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/imsuite/src/leader_elect_lcr.c
|
#pragma omp parallel for schedule(SCHEDULING_METHOD)
| 100
|
he chosen leader.
*/
int identify_leader(process* processes, int N) {
int chosen_id = -1;
<LOOP-START>for (int i = 0; i < N; i++) {
if (processes[i].status == 1) {
chosen_id = i;
// this will happen at most once
}
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule(SCHEDULING_METHOD)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/tests/polybench-c-3.2/utilities/polybench.c
|
#pragma omp parallel for
| 100
|
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
<LOOP-START>for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
| 100
|
---------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
<LOOP-START>for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(j)
| 100
|
-
c Normalize z to obtain x
c-------------------------------------------------------------------*/
<LOOP-START>for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(i)
| 100
|
ing vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
<LOOP-START>for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
| 100
|
--------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
<LOOP-START>for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(j)
| 100
|
-
c Normalize z to obtain x
c-------------------------------------------------------------------*/
<LOOP-START>for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(i)
| 100
|
to mark nonzero positions
c---------------------------------------------------------------------*/
<LOOP-START>for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(j)
| 100
|
umber of triples in each row
c-------------------------------------------------------------------*/
<LOOP-START>for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(k,j)
| 100
|
... preload data pages
c---------------------------------------------------------------------*/
<LOOP-START>for(j = 0;j <= nrows-1;j++) {
for(k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(k,j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/CG/cg-orig.c
|
#pragma omp parallel for default(shared) private(i)
| 100
|
adding elements
c-------------------------------------------------------------------*/
nza = 0;
<LOOP-START>for (i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/FT/ft-orig.c
|
#pragma omp parallel for default(shared) private(i,j,k)
| 100
|
ier space
c-------------------------------------------------------------------*/
int i, j, k;
<LOOP-START>for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/FT/ft-orig.c
|
#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk)
| 100
|
ck:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
<LOOP-START>for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/SP/sp-orig.c
|
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
| 100
|
--------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
<LOOP-START>for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r3;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = -r2;
rhs[1][i][j][k] = r1;
rhs[2][i][j][k] = bt * ( r4 - r5 );
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/SP/sp-orig.c
|
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
| 100
|
-------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
<LOOP-START>for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r1;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = bt * ( r4 - r5 );
rhs[1][i][j][k] = -r3;
rhs[2][i][j][k] = r2;
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/EP/ep-orig.c
|
#pragma omp parallel for default(shared) private(i)
| 100
|
code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
<LOOP-START>for (i = 0; i < 2*NK; i++) x[i] = -1.0e99;
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)
| 100
|
---------------------------------------------------*/
int i3, i2, i1;
double r1[M], r2[M];
<LOOP-START>for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
/*--------------------------------------------------------------------
c Assume c(3) = 0 (Enable line below if c(3) not= 0)
c---------------------------------------------------------------------
c > + c(3) * ( r2(i1-1) + r2(i1+1) )
c-------------------------------------------------------------------*/
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)
| 100
|
---------------------------------------------------*/
int i3, i2, i1;
double u1[M], u2[M];
<LOOP-START>for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
/*--------------------------------------------------------------------
c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)
c---------------------------------------------------------------------
c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)
c > + u1(i1) )
c-------------------------------------------------------------------*/
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
| 100
|
lse {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
<LOOP-START>for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
/*C i3 = 2*j3-1*/
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
/*C i2 = 2*j2-1*/
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]
+ r[i3][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]
+ r[i3][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]
+ r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * ( x1[i1] + x1[i1+2] + y2)
+ 0.0625 * ( y1[i1] + y1[i1+2] );
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)
| 100
|
parameter( m=535 )
*/
double z1[M], z2[M], z3[M];
if ( n1 != 3 && n2 != 3 && n3 != 3 ) {
<LOOP-START>for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+0.25*( z1[i1] + z1[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+0.25*( z2[i1] + z2[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+0.25* z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+0.125*( z3[i1] + z3[i1+1] );
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)
| 100
|
--*/
double s = 0.0;
int i3, i2, i1, n;
double a = 0.0, tmp = 0.0;
n = nx*ny*nz;
<LOOP-START>for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
s = s + r[i3][i2][i1] * r[i3][i2][i1];
a = fabs(r[i3][i2][i1]);
if (a > tmp) tmp = a;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for private(i2, i1)
| 100
|
ber\n");
for (i = MM-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
}
printf("\n");*/
<LOOP-START>for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i2, i1) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/amannougrahiya/imop-compiler/runner/NPB3.0-omp-C-master/MG/mg.c
|
#pragma omp parallel for private(i1,i2,i3)
| 100
|
-------
c-------------------------------------------------------------------*/
int i1, i2, i3;
<LOOP-START>for (i3 = 0;i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i1,i2,i3)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)
| 100
|
->inputs);
int h, j, k;
double *mw;
double sum;
if (!ann->hidden_layers) {
double *ret = o;
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
mw = w + ((ann->inputs + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->inputs; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)
| 100
|
genann_act_output(ann, sum);
*(o + j) = output;
}
return ret;
}
/* Figure input layer */
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
mw = w + ((ann->inputs + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->inputs; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)
| 100
|
o += ann->hidden;
/* Figure hidden layers, if any. */
for (h = 1; h < ann->hidden_layers; ++h) {
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
mw = w + ((ann->hidden + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->hidden; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)
| 100
|
);
i += ann->hidden;
o += ann->hidden;
}
double const *ret = o;
/* Figure output layer. */
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
mw = w + ((ann->hidden + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->hidden; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for firstprivate(d, t, o) private(j)
| 100
|
/
if (genann_act_output == genann_act_linear ||
ann->activation_output == genann_act_linear) {
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
*(d + j) = *(t + j) - *(o + j);
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(d, t, o) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for firstprivate(d, t, o) private(j)
| 100
|
te(j)
for (j = 0; j < ann->outputs; ++j) {
*(d + j) = *(t + j) - *(o + j);
}
}
else {
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
*(d + j) = (*(t + j) - *(o + j)) * *(o + j) * (1.0 - *(o + j));
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(d, t, o) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(o, d)
| 100
|
st ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden + 1) * ann->hidden * (h));
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
double delta = 0;
for (k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); ++k) {
const double forward_delta = dd[k];
const int windex = k * (ann->hidden + 1) + (j + 1);
const double forward_weight = ww[windex];
delta += forward_delta * forward_weight;
}
*(d + j) = *(o + j) * (1.0 - *(o + j)) * delta;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(o, d)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(d, w)
| 100
|
ann->inputs + (ann->hidden) * (ann->hidden_layers - 1))
: 0);
/* Set output layer weights. */
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
double *mw = w + ((ann->hidden_layers ? ann->hidden : ann->inputs) + 1) * j;
*mw++ += *(d + j) * learning_rate * -1.0;
for (k = 1; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {
*mw++ += *(d + j) * learning_rate * i[k - 1];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(d, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase1/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(d, w)
| 100
|
+ (h
? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1))
: 0);
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
double *mw = w + ((h == 0 ? ann->inputs : ann->hidden) + 1) * j;
*mw++ += *(d + j) * learning_rate * -1.0;
for (k = 1; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {
*mw++ += *(d + j) * learning_rate * i[k - 1];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(d, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)
| 100
|
n->inputs);
int h, j, k;
double *mw;
double sum;
if (!ann->hidden_layers) {
double *ret = o;
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
mw = w + ((ann->inputs + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->inputs; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)
| 100
|
genann_act_output(ann, sum);
*(o + j) = output;
}
return ret;
}
/* Figure input layer */
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
mw = w + ((ann->inputs + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->inputs; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)
| 100
|
o += ann->hidden;
/* Figure hidden layers, if any. */
for (h = 1; h < ann->hidden_layers; ++h) {
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
mw = w + ((ann->hidden + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->hidden; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)
| 100
|
);
i += ann->hidden;
o += ann->hidden;
}
double const *ret = o;
/* Figure output layer. */
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
mw = w + ((ann->hidden + 1) * j);
sum = *mw++ * -1.0;
for (k = 0; k < ann->hidden; ++k) {
sum += *mw++ * i[k];
}
double output = genann_act_output(ann, sum);
*(o + j) = output;
}<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(8), private(k, j, mw, sum) firstprivate(o, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for firstprivate(d, t, o) private(j)
| 100
|
/
if (genann_act_output == genann_act_linear ||
ann->activation_output == genann_act_linear) {
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
*(d + j) = *(t + j) - *(o + j);
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(d, t, o) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for firstprivate(d, t, o) private(j)
| 100
|
te(j)
for (j = 0; j < ann->outputs; ++j) {
*(d + j) = *(t + j) - *(o + j);
}
}
else {
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
*(d + j) = (*(t + j) - *(o + j)) * *(o + j) * (1.0 - *(o + j));
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(d, t, o) private(j)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(o, d)
| 100
|
st ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden + 1) * ann->hidden * (h));
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
double delta = 0;
for (k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); ++k) {
const double forward_delta = dd[k];
const int windex = k * (ann->hidden + 1) + (j + 1);
const double forward_weight = ww[windex];
delta += forward_delta * forward_weight;
}
*(d + j) = *(o + j) * (1.0 - *(o + j)) * delta;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(o, d)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(d, w)
| 100
|
ann->inputs + (ann->hidden) * (ann->hidden_layers - 1))
: 0);
/* Set output layer weights. */
<LOOP-START>for (j = 0; j < ann->outputs; ++j) {
double *mw = w + ((ann->hidden_layers ? ann->hidden : ann->inputs) + 1) * j;
*mw++ += *(d + j) * learning_rate * -1.0;
for (k = 1; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {
*mw++ += *(d + j) * learning_rate * i[k - 1];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(d, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for private(j, k) firstprivate(d, w)
| 100
|
+ (h
? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1))
: 0);
<LOOP-START>for (j = 0; j < ann->hidden; ++j) {
double *mw = w + ((h == 0 ? ann->inputs : ann->hidden) + 1) * j;
*mw++ += *(d + 1) * learning_rate * -1.0;
for (k = 1; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {
*mw++ += *(d + 1) * learning_rate * i[k - 1];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j, k) firstprivate(d, w)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for
| 100
|
size * 10;
}
genann *save = genann_copy(ann);
/* Take a random guess at the ANN weights. */
<LOOP-START>for (i = 0; i < ann->total_weights; ++i) {
ann->weight[i] += ((double)rand()) / RAND_MAX - 0.5;
}<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Pmoonesi/multicore-processing-fcnn/phase2/genann_p.c
|
#pragma omp parallel for reduction(+:err)
| 100
|
((double)rand()) / RAND_MAX - 0.5;
}
/* See how we did. */
err = 0;
int ind = 0, ans = 0;
<LOOP-START>for (i = 0; i < size; i++) {
const double *guess = genann_run(ann, inputs[i]);
err += genann_difference(guess, desired_outputs[i], 10);
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:err)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/billDrett/GameOfLife-MPI-OpenMp-Cuda-/openMP/openmpMasterThreadCommunication/main.c
|
#pragma omp parallel for collapse(2)
| 100
|
neigbors
{
MPI_Start(&receiveReq[pos][i]);
}
//<LOOP-START>for(i = 2; i < nRows-2; i++) //calculate data inside the 2d array where we have all the data to find the new value of the cell
{
#pragma omp for
for(j = 2; j <nColumns-2; j++)
{
currentBlock[i][j] = updatedValue(prevBlock[i][j], activeNeighborsNoBound(prevBlock, i, j));
}
}<LOOP-END> <OMP-START>#pragma omp parallel for collapse(2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/billDrett/GameOfLife-MPI-OpenMp-Cuda-/openMP/openmpParrallelCommunication/main.c
|
#pragma omp parallel for collapse(2)
| 100
|
neigbors
{
MPI_Start(&receiveReq[pos][i]);
}
//<LOOP-START>for(i = 2; i < nRows-2; i++) //calculate data inside the 2d array where we have all the data to find the new value of the cell
{
#pragma omp for
for(j = 2; j <nColumns-2; j++)
{
currentBlock[i][j] = updatedValue(prevBlock[i][j], activeNeighborsNoBound(prevBlock, i, j));
}
}<LOOP-END> <OMP-START>#pragma omp parallel for collapse(2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/mperlet/matrix_multiplication/src/omp.c
|
#pragma omp parallel for
| 100
|
t_matrix->cols, sizeof(double));
// calculate the result matrix with omp (use pragma)
<LOOP-START>for (int i = 0; i < result_matrix->rows; i++) {
for (int j = 0; j < result_matrix->cols; j++) {
for (int k = 0; k < m_1->cols; k++) {
result_matrix->mat_data[i][j] += m_1->mat_data[i][k] * m_2->mat_data[k][j];
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)
| 100
|
add_pd(
_mm256_add_pd(_mm256_add_pd(sum1, sum2), _mm256_add_pd(sum3, sum4)));
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)
| 100
|
add_ps(
_mm256_add_ps(_mm256_add_ps(sum1, sum2), _mm256_add_ps(sum3, sum4)));
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for
| 100
|
i;
#ifdef USE_AVX
#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)
{
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)
| 100
|
arallel for
for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for
| 100
|
i;
#ifdef USE_AVX
#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)
{
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)
| 100
|
arallel for
for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for
| 100
|
i;
#ifdef USE_AVX
#pragma omp parallel shared(n, data1, data2) private(i) reduction(+ : sum)
{
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)
| 100
|
arallel for
for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data1[i] * data2[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data1, data2) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)
| 100
|
)
{
#pragma omp for
for (i = 0; i < n; i++) {
sum += data[i];
}
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/vtsynergy/MetaMorph/metamorph-backends/openmp-backend/metamorph_openmp_mic.c
|
#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)
| 100
|
)
{
#pragma omp for
for (i = 0; i < n; i++) {
sum += data[i];
}
}
#else
<LOOP-START>for (i = 0; i < n; i++) {
sum += data[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(n, data) private(i) reduction(+ : sum)<OMP-END>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.