filename
stringlengths 78
241
| omp_pragma_line
stringlengths 24
416
| context_chars
int64 100
100
| text
stringlengths 152
177k
|
|---|---|---|---|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr46032-2.c
|
#pragma omp parallel for
| 100
|
int
foo (void)
{
int a[N], b[N], c[N];
int *ap = &a[0];
int *bp = &b[0];
int *cp = &c[0];
<LOOP-START>for (unsigned int idx = 0; idx < N; idx++)
{
ap[idx] = 1;
bp[idx] = 2;
cp[idx] = ap[idx];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27388-1.c
|
#pragma omp parallel for firstprivate (n)
| 100
|
e-omplower" } */
int n, o;
void
foo (void)
{
#pragma omp parallel firstprivate (n)
{
int i;
<LOOP-START>for (i = 0; i < 10; i++)
++n;
#pragma omp atomic
o += n;
}
}
/* { dg-final { scan-tree-dump-times "shared\\\(i\\\)" 0 "omplower" } }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (n)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-24.c
|
#pragma omp parallel for schedule (nonmonotonic : dynamic, 4)
| 100
|
g-options "-O2 -fopenmp -fdump-tree-ssa" } */
extern void bar(int);
void foo (void)
{
int i;
<LOOP-START>for (i = 0; i < 37; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_nonmonotonic_dynamic" 1 "ssa" } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule (nonmonotonic : dynamic, 4)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr38676.c
|
#pragma omp parallel for shared(foo)
| 100
|
38676 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
int
main ()
{
int bar, foo = 1;
<LOOP-START>for (bar = 0; bar < 3; bar++)
{
switch (foo)
{
case 1:
break;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for shared(foo)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sink-fold-2.c
|
#pragma omp parallel for ordered(2)
| 100
|
/* { dg-do compile } */
int i,j, N;
extern void bar();
void
funk ()
{
<LOOP-START>for (i=0; i < N; i += 3)
for (j=0; j < N; ++j)
{
#pragma omp ordered depend(sink:i-8,j-1) /* { dg-warning "refers to iteration never in the iteration space" } */
#pragma omp ordered depend(sink:i+3,j-1) /* { dg-warning "waiting for lexically later iteration" } */
bar();
#pragma omp ordered depend(source)
}<LOOP-END> <OMP-START>#pragma omp parallel for ordered(2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr68128-1.c
|
#pragma omp parallel for private (i, j, k, l, a, b, c, s, e)
| 100
|
, float v, float w, float x, float y, float z, float t)
{
int i, j, k, l;
float a, *b, c, s, e;
<LOOP-START>for (j = 0; j < 1024; j++)
{
k = j * 64;
l = j * 64 + 63;
a = v + j * w;
b = u + j * 64;
for (i = k; i <= l; i++, b++, a += w)
{
c = a * a + y;
s = (1.f - c * x) * (1.f - c * x);
e = t * (1 / __builtin_sqrtf (c)) * s;
*b += (c < z ? e : 0);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private (i, j, k, l, a, b, c, s, e)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27388-2.c
|
#pragma omp parallel for shared (i)
| 100
|
dg-options "-fopenmp -fdump-tree-omplower" } */
extern void baz (int);
void
foo (void)
{
int i;
<LOOP-START>for (i = 0; i < 2; i++)
baz (i);
}
void
bar (void)
{
int j = 0;
#pragma omp parallel shared (j)
{
j++;
#pragma omp for
for (j = 0; j < 2; j++)
baz (j);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for shared (i)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/for-13.c
|
#pragma omp parallel for default(none)
| 100
|
}
// { dg-options "-fopenmp -fdump-tree-ompexp" }
extern void bar(int);
void foo(void)
{
int i;
<LOOP-START>for (i = 0; i < 10; i++)
bar(i);
}
// { dg-final { scan-tree-dump-times "omp_data_o" 0 "ompexp" } }<LOOP-END> <OMP-START>#pragma omp parallel for default(none)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr49640.c
|
#pragma omp parallel for private(ii,jj,kk)
| 100
|
Q, int R, int i, int j, int k,
unsigned char x[P][Q][R], int y[N][M][K])
{
int ii, jj, kk;
<LOOP-START>for (ii = 0; ii < P; ++ii)
for (jj = 0; jj < Q; ++jj)
for (kk = 0; kk < R; ++kk)
y[i + ii][j + jj][k + kk] = x[ii][jj][kk];
}
void
bar (int N, int M, int K, int P, int Q, int R, int i, int j, int k,
unsigned char x[P][Q][R], float y[N][M][K], float factor, float zero)
{
int ii, jj, kk;
#pragma omp parallel for private(ii,jj,kk)
for (ii = 0; ii < P; ++ii)
for (jj = 0; jj < Q; ++jj)
for (kk = 0; kk < R; ++kk)
y[i + ii][j + jj][k + kk] = factor * x[ii][jj][kk] + zero;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(ii,jj,kk)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sharing-1.c
|
#pragma omp parallel for \
| 100
|
, j, s, l;
p = malloc (sizeof (int));
if (p == NULL)
return 0;
*p = 7;
s = 6;
l = 0;
<LOOP-START>default (none) private (p) shared (s)
for (i = 0; i < 64; i++)
{
int k = foo (0); /* Predetermined - private (automatic var declared */
k++; /* in scope of construct). */
thrglobalvar++; /* Predetermined - threadprivate. */
thrlocvar++; /* Predetermined - threadprivate. */
foo (i); /* Predetermined - private (omp for loop variable). */
foo (constvar); /* Predetermined - shared (const qualified type). */
foo (*p); /* *p predetermined - shared (heap allocated */
(*p)++; /* storage). */
bar (p); /* Explicitly determined - private. */
foo (s); /* Explicitly determined - shared. */
globalvar++; /* { dg-error "not specified in" } */
locvar++; /* { dg-error "not specified in" } */
l++; /* { dg-error "not specified in" } */
for (j = 0; j < 2; j++); /* { dg-error "not specified in" } */
}<LOOP-END> <OMP-START>#pragma omp parallel for \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/sink-fold-1.c
|
#pragma omp parallel for ordered(3)
| 100
|
" } */
/* Test depend(sink) clause folding. */
int i,j,k, N;
extern void bar();
void
funk ()
{
<LOOP-START>for (i=0; i < N; i++)
for (j=0; j < N; ++j)
for (k=0; k < N; ++k)
{
/* We remove the (sink:i,j-1,k) by virtue of it the i+0. The remaining
clauses get folded with a GCD of -2 for `i' and a maximum of -2, +2 for
'j' and 'k'. */
#pragma omp ordered \
depend(sink:i-8,j-2,k+2) \
depend(sink:i, j-1,k) \
depend(sink:i-4,j-3,k+6) \
depend(sink:i-6,j-4,k-6)
bar();
#pragma omp ordered depend(source)
}<LOOP-END> <OMP-START>#pragma omp parallel for ordered(3)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27415.c
|
#pragma omp parallel for firstprivate (i)
| 100
|
ould not be firstprivate" } */
for (i = 0; i < 10; i++)
;
}
void
test2 (void)
{
int i = 0;
<LOOP-START>for (i = 0; i < 10; i++)
;
}
void
test3 (void)
{
int i = 0;
#pragma omp parallel
#pragma omp for reduction (+:i) /* { dg-error "should not be reduction" } */
for (i = 0; i < 10; i++)
;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate (i) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr27415.c
|
#pragma omp parallel for reduction (*:i)
| 100
|
"should not be reduction" } */
for (i = 0; i < 10; i++)
;
}
void
test4 (void)
{
int i = 0;
<LOOP-START>for (i = 0; i < 10; i++)
;
}
void
test5 (void)
{
int i = 0;
#pragma omp parallel firstprivate (i)
#pragma omp for
for (i = 0; i < 10; i++)
;
}<LOOP-END> <OMP-START>#pragma omp parallel for reduction (*:i) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/openmp-simd-2.c
|
#pragma omp parallel for simd num_threads(4) safelen(64)
| 100
|
!= s.s || u != s.s)
abort ();
return s.s;
}
void bar(int n, float *a, float *b)
{
int i;
<LOOP-START>for (i = 0; i < n ; i++)
a[i] = b[i];
}
/* { dg-final { scan-tree-dump-times "pragma omp simd reduction\\(u\\) reduction\\(t\\) reduction\\(\\+:s\\) aligned\\(a:32\\)" 1 "original" } }<LOOP-END> <OMP-START>#pragma omp parallel for simd num_threads(4) safelen(64)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/_Atomic-1.c
|
#pragma omp parallel for schedule (dynamic, b)
| 100
|
(i = 0; i < 16; i++)
;
#pragma omp taskloop grainsize (c)
for (i = 0; i < 16; i++)
;
<LOOP-START>for (i = 0; i < 16; i++)
;
j = 0;
#pragma omp simd linear(j:b)
for (i = 0; i < 16; i++)
j += b;
j = 4;
#pragma omp atomic read
b = j;
#pragma omp atomic write
j = c;
#pragma omp atomic
j += c;
#pragma omp atomic capture
b = j += c;
#pragma omp atomic capture
b = ++j;
#pragma omp atomic capture
{ b = j; j = c; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule (dynamic, b)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr30421.c
|
#pragma omp parallel for firstprivate(a) lastprivate(a)
| 100
|
*/
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp -Wall" } */
int
foo ()
{
int a = 0, i;
<LOOP-START>for (i = 0; i < 10; i++)
a += i;
return a;
}
int
bar ()
{
int a = 0, i;
#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)
for (i = 0; i < 10; i++)
a += i;
return a;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(a) lastprivate(a)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr30421.c
|
#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)
| 100
|
lastprivate(a)
for (i = 0; i < 10; i++)
a += i;
return a;
}
int
bar ()
{
int a = 0, i;
<LOOP-START>for (i = 0; i < 10; i++)
a += i;
return a;
}
int
baz ()
{
int a = 0, i;
#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(dynamic)
for (i = 0; i < 10; i++)
a += i;
return a;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(a) lastprivate(a) schedule(static, 2)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/static-chunk-size-one.c
|
#pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)
| 100
|
{ dg-options "-fopenmp -O2 -fdump-tree-optimized -fno-tree-pre" } */
int
bar ()
{
int a = 0, i;
<LOOP-START>for (i = 0; i < 10; i++)
a += i;
return a;
}
/* Two phis for reduction, one in loop header, one in loop exit. One phi for iv
in loop header. */
/* { dg-final { scan-tree-dump-times "PHI" 3 "optimized" } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr29965-6.c
|
#pragma omp parallel for schedule (static, 16)
| 100
|
p for schedule (static, 16)
for (i = 0; i < 2834; i++)
baz ();
}
void
foo2 (void)
{
int i;
<LOOP-START>for (i = 0; i < 2834; i++)
for (;;)
;
}
void
bar2 (void)
{
int i;
#pragma omp parallel for schedule (static, 16)
for (i = 0; i < 2834; i++)
baz ();
}<LOOP-END> <OMP-START>#pragma omp parallel for schedule (static, 16)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr68640.c
|
#pragma omp parallel for
| 100
|
enmp -fdump-tree-ealias-all" } */
#define N 1024
int
foo (int *__restrict__ ap)
{
int *bp = ap;
<LOOP-START>for (unsigned int idx = 0; idx < N; idx++)
ap[idx] = bp[idx];
}
/* { dg-final { scan-tree-dump-times "clique 1 base 1" 2 "ealias" } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr46032-3.c
|
#pragma omp parallel for
| 100
|
e N 2
int
foo (void)
{
int a[N], c[N];
int *ap = &a[0];
int *bp = &a[0];
int *cp = &c[0];
<LOOP-START>for (unsigned int idx = 0; idx < N; idx++)
{
ap[idx] = 1;
bp[idx] = 2;
cp[idx] = ap[idx];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/pr53992.c
|
#pragma omp parallel for
| 100
|
long data[10000];
long i, min=10000;
for (i=0; i<10000; i++) data[i] = -i;
<LOOP-START>for (i=0; i<10000; i++) {
__transaction_atomic
{
if (data[i] < min)
min = data[i];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/block-8.c
|
#pragma omp parallel for
| 100
|
// { dg-do compile }
// PR 24451
int foo()
{
int i;
<LOOP-START>for (i = 0; i < 10; ++i)
return 0; // { dg-error "invalid branch to/from OpenMP structured block" }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.31.1.c
|
#pragma omp parallel for private(i) shared(x, y, n) \
| 100
|
-do compile } */
void
a31_1 (float *x, int *y, int n)
{
int i, b;
float a;
a = 0.0;
b = 0;
<LOOP-START>reduction(+:a) reduction(^:b)
for (i = 0; i < n; i++)
{
a += x[i];
b ^= y[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) shared(x, y, n) \<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.27.1.c
|
#pragma omp parallel for private(a)
| 100
|
/* { dg-do compile } */
void
a27 ()
{
int i, a;
#pragma omp parallel private(a)
{
<LOOP-START>for (i = 0; i < 10; i++)
{
/* do work here */
}<LOOP-END> <OMP-START>#pragma omp parallel for private(a)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/gomp/appendix-a/a.26.2.c
|
#pragma omp parallel for private(a)
| 100
|
int k)
{
a = k; /* The global "a", not the private "a" in f */
}
void
f (int n)
{
int a = 0;
<LOOP-START>for (int i = 1; i < n; i++)
{
a = i;
g (a * 2); /* Private copy of "a" */
}<LOOP-END> <OMP-START>#pragma omp parallel for private(a)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/compilers/gcc/gcc.dg/vect/pr46032.c
|
#pragma omp parallel for
| 100
|
sults[nEvents];
unsigned pData[nEvents];
unsigned coeff = 2;
init (&results[0], &pData[0]);
<LOOP-START>for (int idx = 0; idx < (int)nEvents; idx++)
results[idx] = coeff * pData[idx];
check (&results[0]);
return 0;
}
/* { dg-final { scan-tree-dump-times "note: vectorized 1 loop" 1 "vect" { xfail { vect_no_align && { ! vect_hw_misalign } } } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c
|
#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) schedule(static)
| 100
|
ne_to_coarse = hypre_CTAlloc(int, n_fine);
n_coarse = 0;
n_SF = 0;
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i=0; i < n_fine; i++)
if (CF_marker[i] == 1) n_coarse++;
else if (CF_marker[i] == -3) n_SF++;
pass_array_size = n_fine-n_coarse-n_SF;
if (pass_array_size) pass_array = hypre_CTAlloc(int, pass_array_size);
pass_pointer = hypre_CTAlloc(int, max_num_passes+1);
if (n_fine) assigned = hypre_CTAlloc(int, n_fine);
{
P_diag_i = hypre_CTAlloc(int, n_fine+1);
P_offd_i = hypre_CTAlloc(int, n_fine+1);
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c
|
#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) schedule(static)
| 100
|
stroy(comm_handle);
}
}
n_coarse_offd = 0;
n_SF_offd = 0;
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i=0; i < num_cols_offd; i++)
if (CF_marker_offd[i] == 1) n_coarse_offd++;
else if (CF_marker_offd[i] == -3) n_SF_offd++;
if (num_cols_offd)
{
assigned_offd = hypre_CTAlloc(int, num_cols_offd);
map_S_to_new = hypre_CTAlloc(int, num_cols_offd);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd);
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c
|
#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) schedule(static)
| 100
|
ap_start[pass][0] = 0;
for (i=0; i < num_sends; i++)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];
Pext_send_size += P_ncols[j];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_multi_interp.c
|
#pragma omp parallel for private(i,i1) schedule(static)
| 100
|
P_offd_i[i+1] += P_offd_i[i];
}
/* determine P for coarse points */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i=0; i < n_coarse; i++)
{
i1 = C_array[i];
P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];
P_diag_data[P_diag_i[i1]] = 1.0;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,i1) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(i,diag,scale) schedule(static)
| 100
|
/*d_0* = 1/theta * inv(M)r_0 - M is Jacobi*/
/* x_1 = x_0 + d_0 */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,diag,scale) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(i,diag,scale) schedule(static)
| 100
|
2.0*sigma - p_k);
temp1 = p_kp1*p_k;
temp2 = 2.0*p_kp1/delta;
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,diag,scale) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j,diag) schedule(static)
| 100
|
s_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j,diag) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
ta[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
y_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j,tmp_d) schedule(static)
| 100
|
ec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j,tmp_d) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(j) schedule(static)
| 100
|
*/
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(j) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(i,diag,scale) schedule(static)
| 100
|
0/upper_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,diag,scale) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/parcsr_ls/par_relax_more.c
|
#pragma omp parallel for private(i,diag,scale) schedule(static)
| 100
|
_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,diag,scale) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
int ierr = 0;
size *=hypre_VectorNumVectors(v);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < size; i++)
vector_data[i] = value;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
int seed )
{
double *vector_data = hypre_VectorData(v);
int size = hypre_VectorSize(v);
int i;
int ierr = 0;
hypre_SeedRand(seed);
size *=hypre_VectorNumVectors(v);
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
return ierr;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
;
int ierr = 0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < size; i++)
y_data[i] = x_data[i];
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
int size = hypre_VectorSize(x);
int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorCopy( x, y );
return y;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
int ierr = 0;
size *=hypre_VectorNumVectors(y);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < size; i++)
y_data[i] *= alpha;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
int
hypre_SeqVectorAxpy( double alpha,
hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
int ierr = 0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < size; i++)
y_data[i] += alpha * x_data[i];
return ierr;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
int ierr = 0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < size; i++)
y_data[i] += alpha * x_data[i];
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
double hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
double result = 0.0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:result) schedule(static)
for (i = 0; i < size; i++)
result += y_data[i] * x_data[i];
return result;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/vector.c
|
#pragma omp parallel for private(i) reduction(+:result) schedule(static)
| 100
|
i;
double result = 0.0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < size; i++)
result += y_data[i] * x_data[i];
return result;
}
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
double hypre_VectorSumElts( hypre_Vector *vector )
{
double sum = 0;
double * data = hypre_VectorData( vector );
int size = hypre_VectorSize( vector );
int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) schedule(static)
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) reduction(+:result) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
m_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static) <OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)
| 100
|
to do the A*x multiplication when num_rownnz is smaller than num_rows */
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,jj,j,m,tempx) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i,jj,temp,j) schedule(static)
| 100
|
ride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,jj,temp,j) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
--------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
double *y_data_expand = NULL;
int offset = 0;
#ifdef HYPRE_USING_OPENMP
int my_thread_num = 0;
int i, j, jv, jj;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(double, num_threads*y_size);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j, my_thread_num, offset)
{
my_thread_num = omp_get_thread_num();
offset = y_size*my_thread_num;
#pragma omp for schedule(static)
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
#ifdef HYPRE_USING_OPENMP
/* implied barrier */
#pragma omp for schedule(static)
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
/*y_data_expand[j*y_size + i] = 0; //zero out for next time */
}
}
#ifdef HYPRE_USING_OPENMP
} /* end parallel region */
hypre_TFree(y_data_expand);
}
else
{
/* MULTIPLE VECTORS NOT THREADED YET */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
--------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
m_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(double, num_threads*y_size);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j, my_thread_num, offset)
{
my_thread_num = omp_get_thread_num();
offset = y_size*my_thread_num;
#pragma omp for schedule(static)
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
#ifdef HYPRE_USING_OPENMP
/* implied barrier */
#pragma omp for schedule(static)
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
/*y_data_expand[j*y_size + i] = 0; //zero out for next time */
}
}
#ifdef HYPRE_USING_OPENMP
} /* end parallel region */
hypre_TFree(y_data_expand);
}
else
{
/* MULTIPLE VECTORS NOT THREADED YET */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand);
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
--------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,temp) schedule(static)
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i) schedule(static)
| 100
|
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj,temp) schedule(static)
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/AMG2013/seq_mv/csr_matvec.c
|
#pragma omp parallel for private(i,jj,temp) schedule(static)
| 100
|
#ifdef HYPRE_USING_OPENMP
<LOOP-START>for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for private(i,jj,temp) schedule(static)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
Index_t numElem)
{
//
// pull in the stresses appropriate to the hydro integration
//
<LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i){
sigxx[i] = sigyy[i] = sigzz[i] = - domain.p(i) - domain.q(i) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
te<Real_t>(numElem8) ;
fz_elem = Allocate<Real_t>(numElem8) ;
}
// loop over all elements
<LOOP-START>for( Index_t k=0 ; k<numElem ; ++k )
{
const Index_t* const elemToNode = domain.nodelist(k);
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
// get nodal coordinates from global arrays and copy into local arrays.
CollectDomainNodesToElemNodes(domain, elemToNode, x_local, y_local, z_local);
// Volume calculation involves extra work for numerical consistency
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
if (numthreads > 1) {
// Eliminate thread writing conflicts at the nodes by giving
// each element its own copy to write to
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
&fx_elem[k*8],
&fy_elem[k*8],
&fz_elem[k*8] ) ;
}
else {
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
fx_local, fy_local, fz_local ) ;
// copy nodal force contributions to global force arrray.
for( Index_t lnode=0 ; lnode<8 ; ++lnode ) {
Index_t gnode = elemToNode[lnode];
domain.fx(gnode) += fx_local[lnode];
domain.fy(gnode) += fy_local[lnode];
domain.fz(gnode) += fz_local[lnode];
}
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
we need to copy the data out of the temporary
// arrays used above into the final forces field
<LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = domain.nodeElemCount(gnode) ;
Index_t *cornerList = domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
domain.fx(gnode) = fx_tmp ;
domain.fy(gnode) = fy_tmp ;
domain.fz(gnode) = fz_tmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem, hourg)
| 100
|
t(-1.);
/*************************************************/
/* compute the hourglass modes */
<LOOP-START>for(Index_t i2=0;i2<numElem;++i2){
Real_t *fx_local, *fy_local, *fz_local ;
Real_t hgfx[8], hgfy[8], hgfz[8] ;
Real_t coefficient;
Real_t hourgam[8][4];
Real_t xd1[8], yd1[8], zd1[8] ;
const Index_t *elemToNode = domain.nodelist(i2);
Index_t i3=8*i2;
Real_t volinv=Real_t(1.0)/determ[i2];
Real_t ss1, mass1, volume13 ;
for(Index_t i1=0;i1<4;++i1){
Real_t hourmodx =
x8n[i3] * gamma[i1][0] + x8n[i3+1] * gamma[i1][1] +
x8n[i3+2] * gamma[i1][2] + x8n[i3+3] * gamma[i1][3] +
x8n[i3+4] * gamma[i1][4] + x8n[i3+5] * gamma[i1][5] +
x8n[i3+6] * gamma[i1][6] + x8n[i3+7] * gamma[i1][7];
Real_t hourmody =
y8n[i3] * gamma[i1][0] + y8n[i3+1] * gamma[i1][1] +
y8n[i3+2] * gamma[i1][2] + y8n[i3+3] * gamma[i1][3] +
y8n[i3+4] * gamma[i1][4] + y8n[i3+5] * gamma[i1][5] +
y8n[i3+6] * gamma[i1][6] + y8n[i3+7] * gamma[i1][7];
Real_t hourmodz =
z8n[i3] * gamma[i1][0] + z8n[i3+1] * gamma[i1][1] +
z8n[i3+2] * gamma[i1][2] + z8n[i3+3] * gamma[i1][3] +
z8n[i3+4] * gamma[i1][4] + z8n[i3+5] * gamma[i1][5] +
z8n[i3+6] * gamma[i1][6] + z8n[i3+7] * gamma[i1][7];
hourgam[0][i1] = gamma[i1][0] - volinv*(dvdx[i3 ] * hourmodx +
dvdy[i3 ] * hourmody +
dvdz[i3 ] * hourmodz );
hourgam[1][i1] = gamma[i1][1] - volinv*(dvdx[i3+1] * hourmodx +
dvdy[i3+1] * hourmody +
dvdz[i3+1] * hourmodz );
hourgam[2][i1] = gamma[i1][2] - volinv*(dvdx[i3+2] * hourmodx +
dvdy[i3+2] * hourmody +
dvdz[i3+2] * hourmodz );
hourgam[3][i1] = gamma[i1][3] - volinv*(dvdx[i3+3] * hourmodx +
dvdy[i3+3] * hourmody +
dvdz[i3+3] * hourmodz );
hourgam[4][i1] = gamma[i1][4] - volinv*(dvdx[i3+4] * hourmodx +
dvdy[i3+4] * hourmody +
dvdz[i3+4] * hourmodz );
hourgam[5][i1] = gamma[i1][5] - volinv*(dvdx[i3+5] * hourmodx +
dvdy[i3+5] * hourmody +
dvdz[i3+5] * hourmodz );
hourgam[6][i1] = gamma[i1][6] - volinv*(dvdx[i3+6] * hourmodx +
dvdy[i3+6] * hourmody +
dvdz[i3+6] * hourmodz );
hourgam[7][i1] = gamma[i1][7] - volinv*(dvdx[i3+7] * hourmodx +
dvdy[i3+7] * hourmody +
dvdz[i3+7] * hourmodz );
}
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1=domain.ss(i2);
mass1=domain.elemMass(i2);
volume13=CBRT(determ[i2]);
Index_t n0si2 = elemToNode[0];
Index_t n1si2 = elemToNode[1];
Index_t n2si2 = elemToNode[2];
Index_t n3si2 = elemToNode[3];
Index_t n4si2 = elemToNode[4];
Index_t n5si2 = elemToNode[5];
Index_t n6si2 = elemToNode[6];
Index_t n7si2 = elemToNode[7];
xd1[0] = domain.xd(n0si2);
xd1[1] = domain.xd(n1si2);
xd1[2] = domain.xd(n2si2);
xd1[3] = domain.xd(n3si2);
xd1[4] = domain.xd(n4si2);
xd1[5] = domain.xd(n5si2);
xd1[6] = domain.xd(n6si2);
xd1[7] = domain.xd(n7si2);
yd1[0] = domain.yd(n0si2);
yd1[1] = domain.yd(n1si2);
yd1[2] = domain.yd(n2si2);
yd1[3] = domain.yd(n3si2);
yd1[4] = domain.yd(n4si2);
yd1[5] = domain.yd(n5si2);
yd1[6] = domain.yd(n6si2);
yd1[7] = domain.yd(n7si2);
zd1[0] = domain.zd(n0si2);
zd1[1] = domain.zd(n1si2);
zd1[2] = domain.zd(n2si2);
zd1[3] = domain.zd(n3si2);
zd1[4] = domain.zd(n4si2);
zd1[5] = domain.zd(n5si2);
zd1[6] = domain.zd(n6si2);
zd1[7] = domain.zd(n7si2);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,
hourgam,
coefficient, hgfx, hgfy, hgfz);
// With the threaded version, we write into local arrays per elem
// so we don't have to worry about race conditions
if (numthreads > 1) {
fx_local = &fx_elem[i3] ;
fx_local[0] = hgfx[0];
fx_local[1] = hgfx[1];
fx_local[2] = hgfx[2];
fx_local[3] = hgfx[3];
fx_local[4] = hgfx[4];
fx_local[5] = hgfx[5];
fx_local[6] = hgfx[6];
fx_local[7] = hgfx[7];
fy_local = &fy_elem[i3] ;
fy_local[0] = hgfy[0];
fy_local[1] = hgfy[1];
fy_local[2] = hgfy[2];
fy_local[3] = hgfy[3];
fy_local[4] = hgfy[4];
fy_local[5] = hgfy[5];
fy_local[6] = hgfy[6];
fy_local[7] = hgfy[7];
fz_local = &fz_elem[i3] ;
fz_local[0] = hgfz[0];
fz_local[1] = hgfz[1];
fz_local[2] = hgfz[2];
fz_local[3] = hgfz[3];
fz_local[4] = hgfz[4];
fz_local[5] = hgfz[5];
fz_local[6] = hgfz[6];
fz_local[7] = hgfz[7];
}
else {
domain.fx(n0si2) += hgfx[0];
domain.fy(n0si2) += hgfy[0];
domain.fz(n0si2) += hgfz[0];
domain.fx(n1si2) += hgfx[1];
domain.fy(n1si2) += hgfy[1];
domain.fz(n1si2) += hgfz[1];
domain.fx(n2si2) += hgfx[2];
domain.fy(n2si2) += hgfy[2];
domain.fz(n2si2) += hgfz[2];
domain.fx(n3si2) += hgfx[3];
domain.fy(n3si2) += hgfy[3];
domain.fz(n3si2) += hgfz[3];
domain.fx(n4si2) += hgfx[4];
domain.fy(n4si2) += hgfy[4];
domain.fz(n4si2) += hgfz[4];
domain.fx(n5si2) += hgfx[5];
domain.fy(n5si2) += hgfy[5];
domain.fz(n5si2) += hgfz[5];
domain.fx(n6si2) += hgfx[6];
domain.fy(n6si2) += hgfy[6];
domain.fz(n6si2) += hgfz[6];
domain.fx(n7si2) += hgfx[7];
domain.fy(n7si2) += hgfy[7];
domain.fz(n7si2) += hgfz[7];
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem, hourg)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
if (numthreads > 1) {
// Collect the data from the local arrays into the final force arrays
<LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = domain.nodeElemCount(gnode) ;
Index_t *cornerList = domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
domain.fx(gnode) += fx_tmp ;
domain.fy(gnode) += fy_tmp ;
domain.fz(gnode) += fz_tmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
al_t>(numElem8) ;
Real_t *z8n = Allocate<Real_t>(numElem8) ;
/* start loop over elements */
<LOOP-START>for (Index_t i=0 ; i<numElem ; ++i){
Real_t x1[8], y1[8], z1[8] ;
Real_t pfx[8], pfy[8], pfz[8] ;
Index_t* elemToNode = domain.nodelist(i);
CollectDomainNodesToElemNodes(domain, elemToNode, x1, y1, z1);
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
for(Index_t ii=0;ii<8;++ii){
Index_t jj=8*i+ii;
dvdx[jj] = pfx[ii];
dvdy[jj] = pfy[ii];
dvdz[jj] = pfz[ii];
x8n[jj] = x1[ii];
y8n[jj] = y1[ii];
z8n[jj] = z1[ii];
}
determ[i] = domain.volo(i) * domain.v(i);
/* Do a check for negative volumes */
if ( domain.v(i) <= Real_t(0.0) ) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
lem,
domain.numNode()) ;
// check for negative element volume
<LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
domain.sizeX() + 1, domain.sizeY() + 1, domain.sizeZ() + 1,
true, false) ;
#endif
<LOOP-START>for (Index_t i=0; i<numNode; ++i) {
domain.fx(i) = Real_t(0.0) ;
domain.fy(i) = Real_t(0.0) ;
domain.fz(i) = Real_t(0.0) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
**************/
static inline
void CalcAccelerationForNodes(Domain &domain, Index_t numNode)
{
<LOOP-START>for (Index_t i = 0; i < numNode; ++i) {
domain.xdd(i) = domain.fx(i) / domain.nodalMass(i);
domain.ydd(i) = domain.fy(i) / domain.nodalMass(i);
domain.zdd(i) = domain.fz(i) / domain.nodalMass(i);
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
(Domain &domain, const Real_t dt, const Real_t u_cut,
Index_t numNode)
{
<LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i )
{
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = domain.xd(i) + domain.xdd(i) * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);
domain.xd(i) = xdtmp ;
ydtmp = domain.yd(i) + domain.ydd(i) * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
domain.yd(i) = ydtmp ;
zdtmp = domain.zd(i) + domain.zdd(i) * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
domain.zd(i) = zdtmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numNode)
| 100
|
*****/
static inline
void CalcPositionForNodes(Domain &domain, const Real_t dt, Index_t numNode)
{
<LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i )
{
domain.x(i) += domain.xd(i) * dt ;
domain.y(i) += domain.yd(i) * dt ;
domain.z(i) += domain.zd(i) * dt ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numNode)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem, deltaTime)
| 100
|
w,
Real_t deltaTime, Index_t numElem )
{
// loop over all elements
<LOOP-START>for( Index_t k=0 ; k<numElem ; ++k )
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
Real_t volume ;
Real_t relativeVolume ;
const Index_t* const elemToNode = domain.nodelist(k) ;
// get nodal coordinates from global arrays and copy into local arrays.
CollectDomainNodesToElemNodes(domain, elemToNode, x_local, y_local, z_local);
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / domain.volo(k) ;
vnew[k] = relativeVolume ;
domain.delv(k) = relativeVolume - domain.v(k) ;
// set characteristic length
domain.arealg(k) = CalcElemCharacteristicLength(x_local, y_local, z_local,
volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = elemToNode[lnode];
xd_local[lnode] = domain.xd(gnode);
yd_local[lnode] = domain.yd(gnode);
zd_local[lnode] = domain.zd(gnode);
}
Real_t dt2 = Real_t(0.5) * deltaTime;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives( x_local, y_local, z_local,
B, &detJ );
CalcElemVelocityGradient( xd_local, yd_local, zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
domain.dxx(k) = D[0];
domain.dyy(k) = D[1];
domain.dzz(k) = D[2];
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem, deltaTime)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
deltatime, numElem) ;
// element loop to do some stuff not included in the elemlib function.
<LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k )
{
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdov = domain.dxx(k) + domain.dyy(k) + domain.dzz(k) ;
Real_t vdovthird = vdov/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
domain.vdov(k) = vdov ;
domain.dxx(k) -= vdovthird ;
domain.dyy(k) -= vdovthird ;
domain.dzz(k) -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (vnew[k] <= Real_t(0.0))
{
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElem)
| 100
|
onotonicQGradientsForElems(Domain& domain, Real_t vnew[])
{
Index_t numElem = domain.numElem();
<LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i ) {
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
const Index_t *elemToNode = domain.nodelist(i);
Index_t n0 = elemToNode[0] ;
Index_t n1 = elemToNode[1] ;
Index_t n2 = elemToNode[2] ;
Index_t n3 = elemToNode[3] ;
Index_t n4 = elemToNode[4] ;
Index_t n5 = elemToNode[5] ;
Index_t n6 = elemToNode[6] ;
Index_t n7 = elemToNode[7] ;
Real_t x0 = domain.x(n0) ;
Real_t x1 = domain.x(n1) ;
Real_t x2 = domain.x(n2) ;
Real_t x3 = domain.x(n3) ;
Real_t x4 = domain.x(n4) ;
Real_t x5 = domain.x(n5) ;
Real_t x6 = domain.x(n6) ;
Real_t x7 = domain.x(n7) ;
Real_t y0 = domain.y(n0) ;
Real_t y1 = domain.y(n1) ;
Real_t y2 = domain.y(n2) ;
Real_t y3 = domain.y(n3) ;
Real_t y4 = domain.y(n4) ;
Real_t y5 = domain.y(n5) ;
Real_t y6 = domain.y(n6) ;
Real_t y7 = domain.y(n7) ;
Real_t z0 = domain.z(n0) ;
Real_t z1 = domain.z(n1) ;
Real_t z2 = domain.z(n2) ;
Real_t z3 = domain.z(n3) ;
Real_t z4 = domain.z(n4) ;
Real_t z5 = domain.z(n5) ;
Real_t z6 = domain.z(n6) ;
Real_t z7 = domain.z(n7) ;
Real_t xv0 = domain.xd(n0) ;
Real_t xv1 = domain.xd(n1) ;
Real_t xv2 = domain.xd(n2) ;
Real_t xv3 = domain.xd(n3) ;
Real_t xv4 = domain.xd(n4) ;
Real_t xv5 = domain.xd(n5) ;
Real_t xv6 = domain.xd(n6) ;
Real_t xv7 = domain.xd(n7) ;
Real_t yv0 = domain.yd(n0) ;
Real_t yv1 = domain.yd(n1) ;
Real_t yv2 = domain.yd(n2) ;
Real_t yv3 = domain.yd(n3) ;
Real_t yv4 = domain.yd(n4) ;
Real_t yv5 = domain.yd(n5) ;
Real_t yv6 = domain.yd(n6) ;
Real_t yv7 = domain.yd(n7) ;
Real_t zv0 = domain.zd(n0) ;
Real_t zv1 = domain.zd(n1) ;
Real_t zv2 = domain.zd(n2) ;
Real_t zv3 = domain.zd(n3) ;
Real_t zv4 = domain.zd(n4) ;
Real_t zv5 = domain.zd(n5) ;
Real_t zv6 = domain.zd(n6) ;
Real_t zv7 = domain.zd(n7) ;
Real_t vol = domain.volo(i)*vnew[i] ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;
Real_t dyj = Real_t(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;
Real_t dzj = Real_t(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;
Real_t dxi = Real_t( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;
Real_t dyi = Real_t( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;
Real_t dzi = Real_t( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;
Real_t dxk = Real_t( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;
Real_t dyk = Real_t( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;
Real_t dzk = Real_t( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
domain.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;
dyv = Real_t(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;
dzv = Real_t(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;
domain.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
domain.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;
dyv = Real_t(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;
dzv = Real_t(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;
domain.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
domain.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;
dyv = Real_t(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;
dzv = Real_t(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;
domain.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)
| 100
|
q_max_slope();
Real_t qlc_monoq = domain.qlc_monoq();
Real_t qqc_monoq = domain.qqc_monoq();
<LOOP-START>for ( Index_t ielem = 0 ; ielem < domain.regElemSize(r); ++ielem ) {
Index_t i = domain.regElemlist(r,ielem);
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Int_t bcMask = domain.elemBC(i) ;
Real_t delvm = 0.0, delvp =0.0;
/* phixi */
Real_t norm = Real_t(1.) / (domain.delv_xi(i)+ ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_xi(domain.lxim(i)); break ;
case XI_M_SYMM: delvm = domain.delv_xi(i) ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_xi(domain.lxip(i)) ; break ;
case XI_P_SYMM: delvp = domain.delv_xi(i) ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( domain.delv_eta(i) + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_eta(domain.letam(i)) ; break ;
case ETA_M_SYMM: delvm = domain.delv_eta(i) ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_eta(domain.letap(i)) ; break ;
case ETA_P_SYMM: delvp = domain.delv_eta(i) ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( domain.delv_zeta(i) + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = domain.delv_zeta(domain.lzetam(i)) ; break ;
case ZETA_M_SYMM: delvm = domain.delv_zeta(i) ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = domain.delv_zeta(domain.lzetap(i)) ; break ;
case ZETA_P_SYMM: delvp = domain.delv_zeta(i) ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: fprintf(stderr, "Error in switch at %s line %d\n",
__FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( domain.vdov(i) > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = domain.delv_xi(i) * domain.delx_xi(i) ;
Real_t delvxeta = domain.delv_eta(i) * domain.delx_eta(i) ;
Real_t delvxzeta = domain.delv_zeta(i) * domain.delx_zeta(i) ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = domain.elemMass(i) / (domain.volo(i) * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
domain.qq(i) = qquad ;
domain.ql(i) = qlin ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length)
| 100
|
Real_t p_cut, Real_t eosvmax,
Index_t length, Index_t *regElemList)
{
<LOOP-START>for (Index_t i = 0; i < length ; ++i) {
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)
| 100
|
t(2.0)/Real_t(3.0) ;
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, emin)
| 100
|
Index_t length, Index_t *regElemList)
{
Real_t *pHalfStep = Allocate<Real_t>(length) ;
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, emin)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0)
| 100
|
c, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq_old[i] = ql_old[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, emin, e_cut)
| 100
|
eal_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, emin, e_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)
| 100
|
vc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Index_t elem = regElemList[i];
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, rho0, q_cut)
| 100
|
vc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
<LOOP-START>for (Index_t i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, rho0, q_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(rho0, ss4o3)
| 100
|
Real_t *bvc, Real_t ss4o3,
Index_t len, Index_t *regElemList)
{
<LOOP-START>for (Index_t i = 0; i < len ; ++i) {
Index_t elem = regElemList[i];
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp);
}
domain.ss(elem) = ssTmp ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(rho0, ss4o3)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(numElemReg)
| 100
|
qq_old, ql_old, rho0, eosvmax,
numElemReg, regElemList);
}
<LOOP-START>for (Index_t i=0; i<numElemReg; ++i) {
Index_t elem = regElemList[i];
domain.p(elem) = p_new[i] ;
domain.e(elem) = e_new[i] ;
domain.q(elem) = q_new[i] ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElemReg)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/lulesh-2.0.3/lulesh.cc
|
#pragma omp parallel for firstprivate(length, v_cut)
| 100
|
ain, Real_t *vnew,
Real_t v_cut, Index_t length)
{
if (length != 0) {
<LOOP-START>for(Index_t i=0 ; i<length ; ++i) {
Real_t tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
domain.v(i) = tmpV ;
}<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(length, v_cut)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/perform_element_loop.hpp
|
#pragma omp parallel for shared (elemIDs)
| 100
|
iter.x, iter.y, iter.z);
}
timer_type t_gn = 0, t_ce = 0, t_si = 0;
timer_type t0 = 0;
<LOOP-START>for(MINIFE_GLOBAL_ORDINAL i=0; i < elemIDs.size(); ++i) {
ElemData<GlobalOrdinal,Scalar> elem_data;
compute_gradient_values(elem_data.grad_vals);
get_elem_nodes_and_coords(mesh, elemIDs[i], elem_data);
compute_element_matrix_and_vector(elem_data);
sum_into_global_linear_system(elem_data, A, b);
}<LOOP-END> <OMP-START>#pragma omp parallel for shared (elemIDs)<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/generate_matrix_structure.hpp
|
#pragma omp parallel for
| 100
|
ffset_ptr = &row_offsets[0];
MINIFE_LOCAL_ORDINAL* const row_coords_ptr = &row_coords[0];
<LOOP-START>for(int r = 0; r < r_n; ++r) {
int iz = r / (xy_width) + box[2][0];
int iy = (r / x_width) % y_width + box[1][0];
int ix = r % x_width + box[0][0];
GlobalOrdinal row_id =
get_id<GlobalOrdinal>(global_nodes_x, global_nodes_y, global_nodes_z,
ix, iy, iz);
row_ptr[r] = mesh.map_id_to_row(row_id);
row_coords_ptr[r*3] = ix;
row_coords_ptr[r*3+1] = iy;
row_coords_ptr[r*3+2] = iz;
MINIFE_LOCAL_ORDINAL nnz = 0;
for(int sz=-1; sz<=1; ++sz) {
for(int sy=-1; sy<=1; ++sy) {
for(int sx=-1; sx<=1; ++sx) {
GlobalOrdinal col_id =
get_id<GlobalOrdinal>(global_nodes_x, global_nodes_y, global_nodes_z,
ix+sx, iy+sy, iz+sz);
if (col_id >= 0 && col_id < global_nrows) {
++nnz;
}
}
}
}
row_offset_ptr[r+1] = nnz;
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/exchange_externals.hpp
|
#pragma omp parallel for
| 100
|
.size();
#ifdef MINIFE_DEBUG
os << "total_to_be_sent: " << total_to_be_sent << std::endl;
#endif
<LOOP-START>for(size_t i=0; i<total_to_be_sent; ++i) {
#ifdef MINIFE_DEBUG
//expensive index range-check:
if (elements_to_send[i] < 0 || elements_to_send[i] > x.coefs.size()) {
os << "error, out-of-range. x.coefs.size()=="<<x.coefs.size()<<", elements_to_send[i]=="<<elements_to_send[i]<<std::endl;
}
send_buffer[i] = x.coefs[elements_to_send[i]];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
y.coefs[0];
ScalarType* wcoefs = &w.coefs[0];
if(beta == 0.0) {
if(alpha == 1.0) {
<LOOP-START>for(int i=0; i<n; ++i) {
wcoefs[i] = xcoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
a omp parallel for
for(int i=0; i<n; ++i) {
wcoefs[i] = xcoefs[i];
}
} else {
<LOOP-START>for(int i=0; i<n; ++i) {
wcoefs[i] = alpha * xcoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
=0; i<n; ++i) {
wcoefs[i] = alpha * xcoefs[i];
}
}
} else {
if(alpha == 1.0) {
<LOOP-START>for(int i=0; i<n; ++i) {
wcoefs[i] = xcoefs[i] + beta * ycoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
for(int i=0; i<n; ++i) {
wcoefs[i] = xcoefs[i] + beta * ycoefs[i];
}
} else {
<LOOP-START>for(int i=0; i<n; ++i) {
wcoefs[i] = alpha * xcoefs[i] + beta * ycoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
&x.coefs[0];
MINIFE_SCALAR* ycoefs = &y.coefs[0];
if(alpha == 1.0 && beta == 1.0) {
<LOOP-START>for(int i = 0; i < n; ++i) {
ycoefs[i] += xcoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
for(int i = 0; i < n; ++i) {
ycoefs[i] += xcoefs[i];
}
} else if (beta == 1.0) {
<LOOP-START>for(int i = 0; i < n; ++i) {
ycoefs[i] += alpha * xcoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
nt i = 0; i < n; ++i) {
ycoefs[i] += alpha * xcoefs[i];
}
} else if (alpha == 1.0) {
<LOOP-START>for(int i = 0; i < n; ++i) {
ycoefs[i] = xcoefs[i] + beta * ycoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
i < n; ++i) {
ycoefs[i] = xcoefs[i] + beta * ycoefs[i];
}
} else if (beta == 0.0) {
<LOOP-START>for(int i = 0; i < n; ++i) {
ycoefs[i] = alpha * xcoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/cea-hpc/pcvs-benchmarks/applications/Corals/miniFe/src/Vector_functions.hpp
|
#pragma omp parallel for
| 100
|
rallel for
for(int i = 0; i < n; ++i) {
ycoefs[i] = alpha * xcoefs[i];
}
} else {
<LOOP-START>for(int i = 0; i < n; ++i) {
ycoefs[i] = alpha * xcoefs[i] + beta * ycoefs[i];
}<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.