filename
stringlengths
78
241
omp_pragma_line
stringlengths
24
416
context_chars
int64
100
100
text
stringlengths
152
177k
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs.c
#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary)
100
(DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif int retval = 0; #if defined(_OPENMP) && DES_BS_VECTOR <LOOP-START>#elif defined(_OPENMP) #pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary) for_each_t(n) for_each_depth() { value = binary[0]; b = (DES_bs_vector *)&DES_bs_all.B[0] DEPTH; mask = b[0] START ^ -(value & 1); mask |= b[1] START ^ -((value >> 1) & 1); mask |= b[2] START ^ -((value >> 2) & 1); mask |= b[3] START ^ -((value >> 3) & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; value >>= 4; b += 4; for (bit = 4; bit < 32; bit += 2) { mask |= b[0] START ^ -(value & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; mask |= b[1] START ^ -((value >> 1) & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; value >>= 2; b += 2; } #ifdef _OPENMP retval = 1; #else return 1; next_depth: ; }<LOOP-END> <OMP-START>#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/DES_bs.c
#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary)
100
ivate(value, mask, bit, b, depth, t) shared(n, DES_bs_all_p, retval, binary) #elif defined(_OPENMP) <LOOP-START>for_each_t(n) for_each_depth() { value = binary[0]; b = (DES_bs_vector *)&DES_bs_all.B[0] DEPTH; mask = b[0] START ^ -(value & 1); mask |= b[1] START ^ -((value >> 1) & 1); mask |= b[2] START ^ -((value >> 2) & 1); mask |= b[3] START ^ -((value >> 3) & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; value >>= 4; b += 4; for (bit = 4; bit < 32; bit += 2) { mask |= b[0] START ^ -(value & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; mask |= b[1] START ^ -((value >> 1) & 1); if (mask == ~(ARCH_WORD)0) goto next_depth; value >>= 2; b += 2; } #ifdef _OPENMP retval = 1; #else return 1; next_depth: ; }<LOOP-END> <OMP-START>#pragma omp parallel for if(n >= 96) default(none) private(value, mask, bit, b, t) shared(n, DES_bs_all_p, retval, binary)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash1_fmt_plug.c
#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)
100
output1x[4*index+3] & PH_MASK_6; } static void nt_hash(int count) { int i; #if defined(_OPENMP) <LOOP-START>for (i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; /* Round 1 */ a = 0xFFFFFFFF + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29); d = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25); c = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21); b = INIT_B + (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17); crypt_out[4*i+0] = a + INIT_A; crypt_out[4*i+1] = b + INIT_B; crypt_out[4*i+2] = c + INIT_C; crypt_out[4*i+3] = d + INIT_D; //Another MD4_crypt for the salt /* Round 1 */ a= 0xFFFFFFFF +crypt_out[4*i+0]; a=(a<<3 )|(a>>29); d=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25); c=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21); b=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13); last[4*i+0]=a; last[4*i+1]=b; last[4*i+2]=c; last[4*i+3]=d; }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mscash1_fmt_plug.c
#pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)
100
count = *pcount; int i; if (new_key) { new_key=0; nt_hash(count); } #if defined(_OPENMP) <LOOP-START>for (i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; a = last[4*i+0]; b = last[4*i+1]; c = last[4*i+2]; d = last[4*i+3]; a += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[5]; output1x[4*i+0]=a; output1x[4*i+1]=b; output1x[4*i+2]=c; output1x[4*i+3]=d; }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/has160_fmt_plug.c
#pragma omp parallel for
100
ypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { has160_ctx ctx; rhash_has160_init(&ctx); rhash_has160_update(&ctx, (unsigned char*)saved_key[index], saved_len[index]); rhash_has160_final(&ctx, (unsigned char*)crypt_out[index]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mongodb_fmt_plug.c
#pragma omp parallel for
100
pt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { if (cur_salt->type == 0) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, ":mongo:", 7); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final((unsigned char*)crypt_out[index], &ctx); } else { unsigned char hexout[32]; unsigned char out[32]; MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, ":mongo:", 7); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final(out, &ctx); hex_encode(out, 16, hexout); MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, 16); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, hexout, 32); MD5_Final((unsigned char*)crypt_out[index], &ctx); } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/tacacs_plus_fmt_plug.c
#pragma omp parallel for
100
ndex; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { if (check_password(index, cur_salt)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic any_cracked |= 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/mysql_netauth_fmt_plug.c
#pragma omp parallel for
100
pt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { unsigned char stage1_hash[20]; unsigned char inner_hash[20]; unsigned char token[20]; SHA_CTX ctx; int i; unsigned char *p = (unsigned char*)crypt_out[index]; SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(stage1_hash, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, stage1_hash, 20); SHA1_Final(inner_hash, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->scramble, 20); SHA1_Update(&ctx, inner_hash, 20); SHA1_Final(token, &ctx); for (i = 0; i < 20; i++) { p[i] = token[i] ^ stage1_hash[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/scrypt_fmt.c
#pragma omp parallel for default(none) private(index) shared(count, failed, max_threads, local, saved_salt, buffer)
100
*pcount, struct db_salt *salt) { int count = *pcount; int index; int failed = 0; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { #ifdef _OPENMP int t = omp_get_thread_num(); if (t >= max_threads) { failed = -1; continue; } #else const int t = 0; uint8_t *hash; hash = yescrypt_r(NULL, &local[t], (const uint8_t *)buffer[index].key, strlen(buffer[index].key), (const uint8_t *)saved_salt, NULL, (uint8_t *)buffer[index].out, sizeof(buffer[index].out)); if (!hash) { failed = errno ? errno : EINVAL; #ifndef _OPENMP break; } }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(index) shared(count, failed, max_threads, local, saved_salt, buffer)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c
#pragma omp parallel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out)
100
nt count = *pcount; int index; #ifdef _OPENMP #ifndef SIMD_COEF_64 #ifdef PRECOMPUTE_CTX_FOR_SALT <LOOP-START>#else #pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out) #else #pragma omp parallel for for (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN); #else SHA512_CTX ctx; #ifdef PRECOMPUTE_CTX_FOR_SALT memcpy(&ctx, &ctx_salt, sizeof(ctx)); #else SHA512_Init(&ctx); SHA512_Update(&ctx, &saved_salt, SALT_SIZE); SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Final((unsigned char *)(crypt_out[index]), &ctx); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c
#pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)
100
lel for default(none) private(index) shared(count, ctx_salt, saved_key, saved_len, crypt_out) #else <LOOP-START>#else #pragma omp parallel for for (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN); #else SHA512_CTX ctx; #ifdef PRECOMPUTE_CTX_FOR_SALT memcpy(&ctx, &ctx_salt, sizeof(ctx)); #else SHA512_Init(&ctx); SHA512_Update(&ctx, &saved_salt, SALT_SIZE); SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Final((unsigned char *)(crypt_out[index]), &ctx); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/XSHA512_fmt_plug.c
#pragma omp parallel for
100
efault(none) private(index) shared(count, saved_salt, saved_key, saved_len, crypt_out) #endif #else <LOOP-START>for (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/MIN_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN); #else SHA512_CTX ctx; #ifdef PRECOMPUTE_CTX_FOR_SALT memcpy(&ctx, &ctx_salt, sizeof(ctx)); #else SHA512_Init(&ctx); SHA512_Update(&ctx, &saved_salt, SALT_SIZE); SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Final((unsigned char *)(crypt_out[index]), &ctx); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/pbkdf2_hmac_sha256_fmt_plug.c
#pragma omp parallel for
100
pt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index += MIN_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA256_BINARY_SIZE, 0); #else pbkdf2_sha256((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA256_BINARY_SIZE, 0); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/sm3_fmt_plug.c
#pragma omp parallel for
100
t crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; #ifdef _OPENMP <LOOP-START>for (index = 0; index < count; index++) { sm3_ctx ctx; sm3_init(&ctx); sm3_update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); sm3_final(&ctx, (unsigned char *)crypt_out[index]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/yescrypt/yescrypt-opt.c
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S)
100
smix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, sha256); } else { uint32_t i; #ifdef _OPENMP <LOOP-START>for (i = 0; i < p; i++) { #ifdef _OPENMP smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, &V[(size_t)2 * r * i * N], NROM, VROM, &XY[(size_t)4 * r * i], NULL, NULL); #else smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, VROM, XY, NULL, NULL); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/john/src/yescrypt/userom.c
#pragma omp parallel for default(none) private(i) shared(n, shared, thread_data, setting, seed, count, save, nsave, key)
100
); } } start = times(&start_tms); n = count * omp_get_max_threads(); count = 0; do { <LOOP-START>for (i = 0; i < n; i++) { unsigned int j = count + i; char p[32]; uint8_t hash[128]; snprintf(p, sizeof(p), "%u", seed + j); thread_data_s *td = &thread_data[omp_get_thread_num()].s; uint64_t start1 = time_us(); #if 1 const char *h = (const char *)yescrypt_r( shared, &td->local, (const uint8_t *)p, strlen(p), setting, &key, hash, sizeof(hash)); #else yescrypt_local_t local; yescrypt_init_local(&local); const char *h = (const char *)yescrypt_r( shared, &local, (const uint8_t *)p, strlen(p), setting, &key, hash, sizeof(hash)); yescrypt_free_local(&local); uint64_t end1 = time_us(); if (end1 < start1) end1 = start1; uint64_t diff1 = end1 - start1; td->total += diff1; if (diff1 < td->min) td->min = diff1; if (diff1 > td->max) td->max = diff1; if (j < nsave && strcmp(save[j], h)) { #pragma omp critical printf("Mismatch at %u, %s != %s\n", j, save[j], h); } }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(i) shared(n, shared, thread_data, setting, seed, count, save, nsave, key)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c
#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue)
100
; seed_shr_30 = seed >> 30; #endif } #ifdef _OPENMP #if defined(__SSE4_1__) || defined(__MIC__) <LOOP-START>#elif defined(__SSE2__) #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30) #else #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30) for (base = start; base < end; base++) { uint32_t seed = (uint32_t)base << P; #if defined(__SSE2__) || defined(__MIC__) typedef struct { vtype a, b, c, d, e, f, g, h; } atype; atype xM, x = {}, x710 = {}; /* Hint to compiler not to waste registers */ volatile atype x1; const vtype cone = _mm_set1_epi32(1); vtype vseed = _mm_set1_epi32(seed); version_t version; #define DO(which, add) \ xM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add)); #if defined(__MIC__) || defined(__AVX512F__) xM.a = _mm512_add_epi32(vseed, _mm512_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); DO(b, 1) DO(c, 32) DO(d, 33) DO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97) #elif defined(__AVX2__) xM.a = _mm256_add_epi32(vseed, _mm256_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14)); DO(b, 1) DO(c, 16) DO(d, 17) DO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49) #else xM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6)); DO(b, 1) DO(c, 8) DO(d, 9) DO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25) #undef DO #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) \ DO(x.e, x1.e, xM.e) \ DO(x.f, x1.f, xM.f) \ DO(x.g, x1.g, xM.g) \ DO(x.h, x1.h, xM.h) if (flavor == PHP_LEGACY) { const vtype c69069 = _mm_set1_epi32(69069); const vtype c69069to396 = _mm_set1_epi32(0x4396a0b1); #define DO(x, x1, xM) \ xM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \ x1 = xM = _mm_mullo_epi32(c69069, xM); \ xM = _mm_mullo_epi32(c69069to396, xM); DO_ALL #undef DO } else { const vtype cmul = _mm_set1_epi32(1812433253U); vtype vi = _mm_add_epi32(cone, cone); unsigned int n = (M - 1) / 22; #define DO(x, x1, xM) \ x1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone); DO_ALL #undef DO do { #define DO(x, x1, xM) \ xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi); #define DO_ALLI \ DO_ALL \ vi = _mm_add_epi32(vi, cone); DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI #undef DO_ALLI #undef DO } while (--n); } version = flavor; if (!(match->flags & MATCH_SKIP)) { const vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff); const vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df); #define DO(x, x1, xM) \ x = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \ _mm_and_si128(x1, c0x7fffffff)), 1)); DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \ _mm_and_si128(x1, cone))); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) DO(x710.e, x.e, x1.e) DO(x710.f, x.f, x1.f) DO(x710.g, x.g, x1.g) DO(x710.h, x.h, x1.h) #undef DO if (version == PHP_521) { #define DO(x) \ x = _mm_xor_si128(x, c0x9908b0df); DO(x.b) DO(x.d) DO(x.f) DO(x.h) #undef DO } else x = x710; } do { uint32_t maybe = 1; if (!(match->flags & MATCH_SKIP)) { const vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680); const vtype c0xefc60000 = _mm_set1_epi32(0xefc60000); #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 11)); DO_ALL #undef DO #define DO_SC(x, s, c) \ x = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c)); #define DO(x, x1, xM) \ DO_SC(x, 7, c0x9d2c5680) \ DO_SC(x, 15, c0xefc60000) DO_ALL #undef DO #undef DO_SC #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 18)); DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x = _mm_srli_epi32(x, 1); DO_ALL #undef DO } } #if defined(__SSE4_1__) || defined(__MIC__) if (match->flags & MATCH_PURE) { #if defined(__MIC__) || defined(__AVX512F__) maybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) | _mm512_cmpeq_epi32_mask(x.b, vvalue) | _mm512_cmpeq_epi32_mask(x.c, vvalue) | _mm512_cmpeq_epi32_mask(x.d, vvalue) | _mm512_cmpeq_epi32_mask(x.e, vvalue) | _mm512_cmpeq_epi32_mask(x.f, vvalue) | _mm512_cmpeq_epi32_mask(x.g, vvalue) | _mm512_cmpeq_epi32_mask(x.h, vvalue); #else vtype amask = _mm_cmpeq_epi32(x.a, vvalue); vtype bmask = _mm_cmpeq_epi32(x.b, vvalue); vtype cmask = _mm_cmpeq_epi32(x.c, vvalue); vtype dmask = _mm_cmpeq_epi32(x.d, vvalue); vtype emask = _mm_cmpeq_epi32(x.e, vvalue); vtype fmask = _mm_cmpeq_epi32(x.f, vvalue); vtype gmask = _mm_cmpeq_epi32(x.g, vvalue); vtype hmask = _mm_cmpeq_epi32(x.h, vvalue); maybe = !(_mm_testz_si128(amask, amask) && _mm_testz_si128(bmask, bmask) && _mm_testz_si128(cmask, cmask) && _mm_testz_si128(dmask, dmask) && _mm_testz_si128(emask, emask) && _mm_testz_si128(fmask, fmask) && _mm_testz_si128(gmask, gmask) && _mm_testz_si128(hmask, hmask)); } if (maybe) { unsigned int i; uint32_t iseed; typedef union { atype v; uint32_t s[8][sizeof(vtype) / 4]; } utype; utype u; /* Hint to compiler not to waste registers */ volatile utype uM; u.v = x; uM.v = xM; #if defined(__MIC__) || defined(__AVX512F__) for (i = 0, iseed = seed; i < 8; i++, iseed += 32) { unsigned int j, k; for (j = 0, k = 30; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 31; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #elif defined(__AVX2__) for (i = 0, iseed = seed; i < 8; i++, iseed += 16) { unsigned int j, k; for (j = 0, k = 14; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 15; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #else for (i = 0, iseed = seed; i < 8; i++, iseed += 8) { COMPARE(u.s[i][0], uM.s[i][0], iseed + 6) COMPARE(u.s[i][1], uM.s[i][1], iseed + 4) COMPARE(u.s[i][2], uM.s[i][2], iseed + 2) COMPARE(u.s[i][3], uM.s[i][3], iseed) i++; COMPARE(u.s[i][0], uM.s[i][0], iseed + 7) COMPARE(u.s[i][1], uM.s[i][1], iseed + 5) COMPARE(u.s[i][2], uM.s[i][2], iseed + 3) COMPARE(u.s[i][3], uM.s[i][3], iseed + 1) } /* Hint to compiler not to spill xM above */ xM = uM.v; } if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); #else typedef struct { uint32_t a, b, c, d; } atype; atype x = {}, x710 = {}; do { atype x1, xM; version_t version; unsigned int i; xM.a = seed; xM.b = seed + 1; xM.c = seed + 2; xM.d = seed + 3; #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) if (flavor == PHP_LEGACY) { #define DO(x, x1, xM) \ xM += xM + 1; \ x1 = xM *= 69069; \ xM *= 0x4396a0b1; DO_ALL #undef DO } else { #define DO(x, x1, xM) \ x1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1; DO_ALL #undef DO for (i = 2; i <= M; i++) { #define DO(x, x1, xM) \ NEXT_STATE(xM, i) DO_ALL #undef DO } } version = flavor; if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM; DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = xin ^ ((x1 & 1) * 0x9908b0df); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) #undef DO if (version == PHP_521) { x.b ^= 0x9908b0df; x.d ^= 0x9908b0df; } else x = x710; } do { if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x ^= x >> 11; \ x ^= (x << 7) & 0x9d2c5680; \ x ^= (x << 15) & 0xefc60000; \ x ^= x >> 18; DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x >>= 1; DO_ALL #undef DO } } COMPARE(x.a, x1.a, xM.a, seed) COMPARE(x.b, x1.b, xM.b, seed + 1) COMPARE(x.c, x1.c, xM.c, seed + 2) COMPARE(x.d, x1.d, xM.d, seed + 3) if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); seed += 4; } while (seed & ((1 << P) - 1)); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c
#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)
100
match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30, vvalue) #elif defined(__SSE2__) <LOOP-START>#else #pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30) for (base = start; base < end; base++) { uint32_t seed = (uint32_t)base << P; #if defined(__SSE2__) || defined(__MIC__) typedef struct { vtype a, b, c, d, e, f, g, h; } atype; atype xM, x = {}, x710 = {}; /* Hint to compiler not to waste registers */ volatile atype x1; const vtype cone = _mm_set1_epi32(1); vtype vseed = _mm_set1_epi32(seed); version_t version; #define DO(which, add) \ xM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add)); #if defined(__MIC__) || defined(__AVX512F__) xM.a = _mm512_add_epi32(vseed, _mm512_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); DO(b, 1) DO(c, 32) DO(d, 33) DO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97) #elif defined(__AVX2__) xM.a = _mm256_add_epi32(vseed, _mm256_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14)); DO(b, 1) DO(c, 16) DO(d, 17) DO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49) #else xM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6)); DO(b, 1) DO(c, 8) DO(d, 9) DO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25) #undef DO #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) \ DO(x.e, x1.e, xM.e) \ DO(x.f, x1.f, xM.f) \ DO(x.g, x1.g, xM.g) \ DO(x.h, x1.h, xM.h) if (flavor == PHP_LEGACY) { const vtype c69069 = _mm_set1_epi32(69069); const vtype c69069to396 = _mm_set1_epi32(0x4396a0b1); #define DO(x, x1, xM) \ xM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \ x1 = xM = _mm_mullo_epi32(c69069, xM); \ xM = _mm_mullo_epi32(c69069to396, xM); DO_ALL #undef DO } else { const vtype cmul = _mm_set1_epi32(1812433253U); vtype vi = _mm_add_epi32(cone, cone); unsigned int n = (M - 1) / 22; #define DO(x, x1, xM) \ x1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone); DO_ALL #undef DO do { #define DO(x, x1, xM) \ xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi); #define DO_ALLI \ DO_ALL \ vi = _mm_add_epi32(vi, cone); DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI #undef DO_ALLI #undef DO } while (--n); } version = flavor; if (!(match->flags & MATCH_SKIP)) { const vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff); const vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df); #define DO(x, x1, xM) \ x = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \ _mm_and_si128(x1, c0x7fffffff)), 1)); DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \ _mm_and_si128(x1, cone))); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) DO(x710.e, x.e, x1.e) DO(x710.f, x.f, x1.f) DO(x710.g, x.g, x1.g) DO(x710.h, x.h, x1.h) #undef DO if (version == PHP_521) { #define DO(x) \ x = _mm_xor_si128(x, c0x9908b0df); DO(x.b) DO(x.d) DO(x.f) DO(x.h) #undef DO } else x = x710; } do { uint32_t maybe = 1; if (!(match->flags & MATCH_SKIP)) { const vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680); const vtype c0xefc60000 = _mm_set1_epi32(0xefc60000); #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 11)); DO_ALL #undef DO #define DO_SC(x, s, c) \ x = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c)); #define DO(x, x1, xM) \ DO_SC(x, 7, c0x9d2c5680) \ DO_SC(x, 15, c0xefc60000) DO_ALL #undef DO #undef DO_SC #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 18)); DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x = _mm_srli_epi32(x, 1); DO_ALL #undef DO } } #if defined(__SSE4_1__) || defined(__MIC__) if (match->flags & MATCH_PURE) { #if defined(__MIC__) || defined(__AVX512F__) maybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) | _mm512_cmpeq_epi32_mask(x.b, vvalue) | _mm512_cmpeq_epi32_mask(x.c, vvalue) | _mm512_cmpeq_epi32_mask(x.d, vvalue) | _mm512_cmpeq_epi32_mask(x.e, vvalue) | _mm512_cmpeq_epi32_mask(x.f, vvalue) | _mm512_cmpeq_epi32_mask(x.g, vvalue) | _mm512_cmpeq_epi32_mask(x.h, vvalue); #else vtype amask = _mm_cmpeq_epi32(x.a, vvalue); vtype bmask = _mm_cmpeq_epi32(x.b, vvalue); vtype cmask = _mm_cmpeq_epi32(x.c, vvalue); vtype dmask = _mm_cmpeq_epi32(x.d, vvalue); vtype emask = _mm_cmpeq_epi32(x.e, vvalue); vtype fmask = _mm_cmpeq_epi32(x.f, vvalue); vtype gmask = _mm_cmpeq_epi32(x.g, vvalue); vtype hmask = _mm_cmpeq_epi32(x.h, vvalue); maybe = !(_mm_testz_si128(amask, amask) && _mm_testz_si128(bmask, bmask) && _mm_testz_si128(cmask, cmask) && _mm_testz_si128(dmask, dmask) && _mm_testz_si128(emask, emask) && _mm_testz_si128(fmask, fmask) && _mm_testz_si128(gmask, gmask) && _mm_testz_si128(hmask, hmask)); } if (maybe) { unsigned int i; uint32_t iseed; typedef union { atype v; uint32_t s[8][sizeof(vtype) / 4]; } utype; utype u; /* Hint to compiler not to waste registers */ volatile utype uM; u.v = x; uM.v = xM; #if defined(__MIC__) || defined(__AVX512F__) for (i = 0, iseed = seed; i < 8; i++, iseed += 32) { unsigned int j, k; for (j = 0, k = 30; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 31; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #elif defined(__AVX2__) for (i = 0, iseed = seed; i < 8; i++, iseed += 16) { unsigned int j, k; for (j = 0, k = 14; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 15; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #else for (i = 0, iseed = seed; i < 8; i++, iseed += 8) { COMPARE(u.s[i][0], uM.s[i][0], iseed + 6) COMPARE(u.s[i][1], uM.s[i][1], iseed + 4) COMPARE(u.s[i][2], uM.s[i][2], iseed + 2) COMPARE(u.s[i][3], uM.s[i][3], iseed) i++; COMPARE(u.s[i][0], uM.s[i][0], iseed + 7) COMPARE(u.s[i][1], uM.s[i][1], iseed + 5) COMPARE(u.s[i][2], uM.s[i][2], iseed + 3) COMPARE(u.s[i][3], uM.s[i][3], iseed + 1) } /* Hint to compiler not to spill xM above */ xM = uM.v; } if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); #else typedef struct { uint32_t a, b, c, d; } atype; atype x = {}, x710 = {}; do { atype x1, xM; version_t version; unsigned int i; xM.a = seed; xM.b = seed + 1; xM.c = seed + 2; xM.d = seed + 3; #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) if (flavor == PHP_LEGACY) { #define DO(x, x1, xM) \ xM += xM + 1; \ x1 = xM *= 69069; \ xM *= 0x4396a0b1; DO_ALL #undef DO } else { #define DO(x, x1, xM) \ x1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1; DO_ALL #undef DO for (i = 2; i <= M; i++) { #define DO(x, x1, xM) \ NEXT_STATE(xM, i) DO_ALL #undef DO } } version = flavor; if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM; DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = xin ^ ((x1 & 1) * 0x9908b0df); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) #undef DO if (version == PHP_521) { x.b ^= 0x9908b0df; x.d ^= 0x9908b0df; } else x = x710; } do { if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x ^= x >> 11; \ x ^= (x << 7) & 0x9d2c5680; \ x ^= (x << 15) & 0xefc60000; \ x ^= x >> 18; DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x >>= 1; DO_ALL #undef DO } } COMPARE(x.a, x1.a, xM.a, seed) COMPARE(x.b, x1.b, xM.b, seed + 1) COMPARE(x.c, x1.c, xM.c, seed + 2) COMPARE(x.d, x1.d, xM.d, seed + 3) if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); seed += 4; } while (seed & ((1 << P) - 1)); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/openwall/php_mt_seed/php_mt_seed.c
#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)
100
one) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30) #else <LOOP-START>for (base = start; base < end; base++) { uint32_t seed = (uint32_t)base << P; #if defined(__SSE2__) || defined(__MIC__) typedef struct { vtype a, b, c, d, e, f, g, h; } atype; atype xM, x = {}, x710 = {}; /* Hint to compiler not to waste registers */ volatile atype x1; const vtype cone = _mm_set1_epi32(1); vtype vseed = _mm_set1_epi32(seed); version_t version; #define DO(which, add) \ xM.which = _mm_add_epi32(xM.a, _mm_set1_epi32(add)); #if defined(__MIC__) || defined(__AVX512F__) xM.a = _mm512_add_epi32(vseed, _mm512_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); DO(b, 1) DO(c, 32) DO(d, 33) DO(e, 64) DO(f, 65) DO(g, 96) DO(h, 97) #elif defined(__AVX2__) xM.a = _mm256_add_epi32(vseed, _mm256_set_epi32( 0, 2, 4, 6, 8, 10, 12, 14)); DO(b, 1) DO(c, 16) DO(d, 17) DO(e, 32) DO(f, 33) DO(g, 48) DO(h, 49) #else xM.a = _mm_add_epi32(vseed, _mm_set_epi32(0, 2, 4, 6)); DO(b, 1) DO(c, 8) DO(d, 9) DO(e, 16) DO(f, 17) DO(g, 24) DO(h, 25) #undef DO #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) \ DO(x.e, x1.e, xM.e) \ DO(x.f, x1.f, xM.f) \ DO(x.g, x1.g, xM.g) \ DO(x.h, x1.h, xM.h) if (flavor == PHP_LEGACY) { const vtype c69069 = _mm_set1_epi32(69069); const vtype c69069to396 = _mm_set1_epi32(0x4396a0b1); #define DO(x, x1, xM) \ xM = _mm_add_epi32(_mm_add_epi32(xM, xM), cone); \ x1 = xM = _mm_mullo_epi32(c69069, xM); \ xM = _mm_mullo_epi32(c69069to396, xM); DO_ALL #undef DO } else { const vtype cmul = _mm_set1_epi32(1812433253U); vtype vi = _mm_add_epi32(cone, cone); unsigned int n = (M - 1) / 22; #define DO(x, x1, xM) \ x1 = xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, seed_shr_30), cone); DO_ALL #undef DO do { #define DO(x, x1, xM) \ xM = _mm_macc_epi32(cmul, _mm_xor_si128(xM, _mm_srli_epi32(xM, 30)), vi); #define DO_ALLI \ DO_ALL \ vi = _mm_add_epi32(vi, cone); DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI DO_ALLI #undef DO_ALLI #undef DO } while (--n); } version = flavor; if (!(match->flags & MATCH_SKIP)) { const vtype c0x7fffffff = _mm_set1_epi32(0x7fffffff); const vtype c0x9908b0df = _mm_set1_epi32(0x9908b0df); #define DO(x, x1, xM) \ x = _mm_xor_si128(xM, _mm_srli_epi32(_mm_or_si128(seed_and_0x80000000, \ _mm_and_si128(x1, c0x7fffffff)), 1)); DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = _mm_xor_si128(xin, _mm_mullo_epi32(c0x9908b0df, \ _mm_and_si128(x1, cone))); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) DO(x710.e, x.e, x1.e) DO(x710.f, x.f, x1.f) DO(x710.g, x.g, x1.g) DO(x710.h, x.h, x1.h) #undef DO if (version == PHP_521) { #define DO(x) \ x = _mm_xor_si128(x, c0x9908b0df); DO(x.b) DO(x.d) DO(x.f) DO(x.h) #undef DO } else x = x710; } do { uint32_t maybe = 1; if (!(match->flags & MATCH_SKIP)) { const vtype c0x9d2c5680 = _mm_set1_epi32(0x9d2c5680); const vtype c0xefc60000 = _mm_set1_epi32(0xefc60000); #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 11)); DO_ALL #undef DO #define DO_SC(x, s, c) \ x = _mm_xor_si128(x, _mm_and_si128(_mm_slli_epi32(x, s), c)); #define DO(x, x1, xM) \ DO_SC(x, 7, c0x9d2c5680) \ DO_SC(x, 15, c0xefc60000) DO_ALL #undef DO #undef DO_SC #define DO(x, x1, xM) \ x = _mm_xor_si128(x, _mm_srli_epi32(x, 18)); DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x = _mm_srli_epi32(x, 1); DO_ALL #undef DO } } #if defined(__SSE4_1__) || defined(__MIC__) if (match->flags & MATCH_PURE) { #if defined(__MIC__) || defined(__AVX512F__) maybe = _mm512_cmpeq_epi32_mask(x.a, vvalue) | _mm512_cmpeq_epi32_mask(x.b, vvalue) | _mm512_cmpeq_epi32_mask(x.c, vvalue) | _mm512_cmpeq_epi32_mask(x.d, vvalue) | _mm512_cmpeq_epi32_mask(x.e, vvalue) | _mm512_cmpeq_epi32_mask(x.f, vvalue) | _mm512_cmpeq_epi32_mask(x.g, vvalue) | _mm512_cmpeq_epi32_mask(x.h, vvalue); #else vtype amask = _mm_cmpeq_epi32(x.a, vvalue); vtype bmask = _mm_cmpeq_epi32(x.b, vvalue); vtype cmask = _mm_cmpeq_epi32(x.c, vvalue); vtype dmask = _mm_cmpeq_epi32(x.d, vvalue); vtype emask = _mm_cmpeq_epi32(x.e, vvalue); vtype fmask = _mm_cmpeq_epi32(x.f, vvalue); vtype gmask = _mm_cmpeq_epi32(x.g, vvalue); vtype hmask = _mm_cmpeq_epi32(x.h, vvalue); maybe = !(_mm_testz_si128(amask, amask) && _mm_testz_si128(bmask, bmask) && _mm_testz_si128(cmask, cmask) && _mm_testz_si128(dmask, dmask) && _mm_testz_si128(emask, emask) && _mm_testz_si128(fmask, fmask) && _mm_testz_si128(gmask, gmask) && _mm_testz_si128(hmask, hmask)); } if (maybe) { unsigned int i; uint32_t iseed; typedef union { atype v; uint32_t s[8][sizeof(vtype) / 4]; } utype; utype u; /* Hint to compiler not to waste registers */ volatile utype uM; u.v = x; uM.v = xM; #if defined(__MIC__) || defined(__AVX512F__) for (i = 0, iseed = seed; i < 8; i++, iseed += 32) { unsigned int j, k; for (j = 0, k = 30; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 31; j < 16; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #elif defined(__AVX2__) for (i = 0, iseed = seed; i < 8; i++, iseed += 16) { unsigned int j, k; for (j = 0, k = 14; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } i++; for (j = 0, k = 15; j < 8; j++, k -= 2) { COMPARE(u.s[i][j], uM.s[i][j], iseed + k) } } #else for (i = 0, iseed = seed; i < 8; i++, iseed += 8) { COMPARE(u.s[i][0], uM.s[i][0], iseed + 6) COMPARE(u.s[i][1], uM.s[i][1], iseed + 4) COMPARE(u.s[i][2], uM.s[i][2], iseed + 2) COMPARE(u.s[i][3], uM.s[i][3], iseed) i++; COMPARE(u.s[i][0], uM.s[i][0], iseed + 7) COMPARE(u.s[i][1], uM.s[i][1], iseed + 5) COMPARE(u.s[i][2], uM.s[i][2], iseed + 3) COMPARE(u.s[i][3], uM.s[i][3], iseed + 1) } /* Hint to compiler not to spill xM above */ xM = uM.v; } if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); #else typedef struct { uint32_t a, b, c, d; } atype; atype x = {}, x710 = {}; do { atype x1, xM; version_t version; unsigned int i; xM.a = seed; xM.b = seed + 1; xM.c = seed + 2; xM.d = seed + 3; #define DO_ALL \ DO(x.a, x1.a, xM.a) \ DO(x.b, x1.b, xM.b) \ DO(x.c, x1.c, xM.c) \ DO(x.d, x1.d, xM.d) if (flavor == PHP_LEGACY) { #define DO(x, x1, xM) \ xM += xM + 1; \ x1 = xM *= 69069; \ xM *= 0x4396a0b1; DO_ALL #undef DO } else { #define DO(x, x1, xM) \ x1 = xM = 1812433253U * (xM ^ seed_shr_30) + 1; DO_ALL #undef DO for (i = 2; i <= M; i++) { #define DO(x, x1, xM) \ NEXT_STATE(xM, i) DO_ALL #undef DO } } version = flavor; if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x = ((seed_and_0x80000000 | (x1 & 0x7fffffff)) >> 1) ^ xM; DO_ALL #undef DO #define DO(xout, xin, x1) \ xout = xin ^ ((x1 & 1) * 0x9908b0df); DO(x710.a, x.a, x1.a) DO(x710.b, x.b, x1.b) DO(x710.c, x.c, x1.c) DO(x710.d, x.d, x1.d) #undef DO if (version == PHP_521) { x.b ^= 0x9908b0df; x.d ^= 0x9908b0df; } else x = x710; } do { if (!(match->flags & MATCH_SKIP)) { #define DO(x, x1, xM) \ x ^= x >> 11; \ x ^= (x << 7) & 0x9d2c5680; \ x ^= (x << 15) & 0xefc60000; \ x ^= x >> 18; DO_ALL #undef DO if (match->flags & MATCH_FULL) { #define DO(x, x1, xM) \ x >>= 1; DO_ALL #undef DO } } COMPARE(x.a, x1.a, xM.a, seed) COMPARE(x.b, x1.b, xM.b, seed + 1) COMPARE(x.c, x1.c, xM.c, seed + 2) COMPARE(x.d, x1.d, xM.d, seed + 3) if (version != PHP_521) break; version = PHP_710; x = x710; } while (1); seed += 4; } while (seed & ((1 << P) - 1)); }<LOOP-END> <OMP-START>#pragma omp parallel for default(none) private(base) shared(match, flavor, start, end, found, seed_and_0x80000000, seed_shr_30)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/kmer-cnt/parallel.h
#pragma omp parallel for
100
obId(0); ProgressPercent progress(scheduledTasks.size()); if (progressBar) progress.advance(0); <LOOP-START>for (size_t i = 0; i < std::min(maxThreads, scheduledTasks.size()); ++i) { bool finished = false; while (!finished) { size_t expected = 0; while(true) { expected = jobId; if (jobId == scheduledTasks.size()) { finished = true; break; } if (jobId.compare_exchange_weak(expected, expected + 1)) { break; } } if (!finished) { updateFun(scheduledTasks[expected]); if (progressBar) progress.advance(); } } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/fmi/bwa-mem2/x86_64/src/FMI_search.cpp
#pragma omp parallel for num_threads(nthreads)
100
ries(int64_t *posArray, int64_t *coordArray, uint32_t count, int32_t nthreads) { uint32_t i; // <LOOP-START>for(i = 0; i < count; i++) { int64_t pos = posArray[i]; int64_t sa_entry = sa_ms_byte[pos]; sa_entry = sa_entry << 32; sa_entry = sa_entry + sa_ls_word[pos]; //_mm_prefetch((const char *)(sa_ms_byte + pos + SAL_PFD), _MM_HINT_T0); coordArray[i] = sa_entry; }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(nthreads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/LorienLV/genarchbench/benchmarks/fmi/bwa-mem2/sve/src/FMI_search.cpp
#pragma omp parallel for num_threads(nthreads)
100
ries(int64_t *posArray, int64_t *coordArray, uint32_t count, int32_t nthreads) { uint32_t i; // <LOOP-START>for(i = 0; i < count; i++) { int64_t pos = posArray[i]; int64_t sa_entry = sa_ms_byte[pos]; sa_entry = sa_entry << 32; sa_entry = sa_entry + sa_ls_word[pos]; //_mm_prefetch((const char *)(sa_ms_byte + pos + SAL_PFD), _MM_HINT_T0); coordArray[i] = sa_entry; }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(nthreads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h
#pragma omp parallel for num_threads(threads) reduction(+: distance)
100
ce = 0; int i = 0; #pragma warning(disable: 6993) /* 你Code Analysis无法分析那就别分析啊,警告无法分析是什么鬼 */ <LOOP-START>for (i = 0; i < dimensions; i++) { distance += pow(datasets[sampleId * dimensions + i] - centers[clusterId * dimensions + i], 2 ); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(threads) reduction(+: distance)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h
#pragma omp parallel for num_threads(threads)
100
新成本*/ costs[0] = costs[1]; costs[1] = 0; /* 聚类预测 */ int j = 0; <LOOP-START>for (j = 0; j < dataSize; j++) { // 累计成本 costs[1] += getCost(j); // 类簇计数器自增 #pragma warning(disable: 6011) sampleCounts[labels[j]] += 1; // 累计类簇中样本值(用于计算聚类中心) for (int k = 0; k < dimensions; k++) { nextMeans[labels[j] * dimensions + k] += datasets[j * dimensions + k]; } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h
#pragma omp parallel for num_threads(threads)
100
uble)); memset(overallMeans, 0, dimensions * sizeof(double)); /* 并行遍历数据集 */ int i = 0; <LOOP-START>for (i = 0; i < dataSize; i++) { // 将当前样本分配至相应的聚类集合 counts[labels[i]] += 1; for (int j = 0; j < dimensions; j++) { // 当前特征的偏离 const double axes = datasets[i * dimensions + j] - centers[labels[i] * dimensions + j]; // 累计当前聚类中心各特征的距离 variances[labels[i] * dimensions + j] += pow(axes, 2); // 累计总体中心 overallMeans[j] += datasets[i * dimensions + j]; // 累计所有样本各特征的方差 minVariances[j] += pow(datasets[i * dimensions + j], 2); } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/Dragon1573/Parallel-GMM/Parallel-GMM/OpenMP/clustering.h
#pragma omp parallel for num_threads(threads)
100
存器 */ costs[0] = costs[1]; costs[1] = 0; /* 模型调优 */ int j = 0; <LOOP-START>for (j = 0; j < dataSize; j++) { // 样本概率 probabilities[j] = 0; // 遍历分布 for (int k = 0; k < clusters; k++) { // 样本属于此分布的概率 double probability = 1; /* 计算单高斯分布概率密度 */ for (int m = 0; m < dimensions; m++) { probability *= 1 / sqrt(2 * PI * variances[k * dimensions + m]); const double square = pow( datasets[j * dimensions + m] - means[k * dimensions + m], 2 ); probability *= exp(-0.5 * square / variances[k * dimensions + m]); } probabilities[j] += priorities[k] * probability; /* 它与上面probability有什么区别,我也不知道... */ // 样本属于当前高斯分布的概率 const double sampleProbability = probability * priorities[k] / probabilities[j]; // 累计权重 nextPriorities[k] += sampleProbability; // 遍历维度 for (int m = 0; m < dimensions; m++) { // 累计均值 nextMeans[k * dimensions + m] += sampleProbability * datasets[j * dimensions + m]; // 累计方差 nextVariances[k * dimensions + m] += sampleProbability * pow(datasets[j * dimensions + m], 2); } } /* 1e-20已经小于double参与计算的最小值了。别问,问就是魔法值 */ // 累计样本引入的成本 costs[1] += max(log10(probabilities[j]), -20); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/ordered/test_ordered_doacross_omp_cur_iteration.c
#pragma omp parallel for ordered
100
int ordered_doacross(){ int a[N]; int b[N]; int c[N]; a[0] = 0; b[0] = 0; c[0] = 0; <LOOP-START>for(int i = 1; i < N; i++){ a[i] = i; #pragma omp ordered doacross(sink: i-1) b[i] = a[i-1]; #pragma omp ordered doacross(source:omp_cur_iteration) c[i] = a[i] + b[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/ordered/test_ordered_doacross.c
#pragma omp parallel for ordered
100
int ordered_doacross(){ int a[N]; int b[N]; int c[N]; a[0] = 0; b[0] = 0; c[0] = 0; <LOOP-START>for(int i = 1; i < N; i++){ a[i] = i; #pragma omp ordered doacross(sink: i-1) b[i] = a[i-1]; #pragma omp ordered doacross(source:) c[i] = a[i] + b[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for ordered<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.2/runtime_calls/test_omp_in_explicit_task.c
#pragma omp parallel for
100
en called inside an explicit task"); for(int i = 0; i < N; i++){ A[i] = 1; <LOOP-START>for(int i = 0; i < N; i++){ A[i] = omp_in_explicit_task(); }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/application_kernels/gemv_target_many_matrices.cpp
#pragma omp parallel for
100
out.push_back(allocate<float>(N)); } // Doing the computation { Timer local("GEMV"); <LOOP-START>for(int i=0; i < NUM_CALC; i++) { gemv(N, 1.0f, manyA[i], manyV[i], manyVout[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/application_kernels/gemv_target_reduction.cpp
#pragma omp parallel for reduction(+:sum)
100
:Vout[:n]) for(int row=0; row<n; row++) { T sum = T(0); const T * A_row = A+row*n; <LOOP-START>for(int col=0; col<n; col++) { sum += A_row[col]*V[col]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c
#pragma omp parallel for
100
, c[N]; int errors = 0, i; #pragma omp task shared(a) private(i) #pragma omp target map(from: a) <LOOP-START>for (i = 0; i < N; i++) a[i] = i; #pragma omp task shared(b) private(i) #pragma omp target map(from: b) #pragma omp parallel for for (i = 0; i < N; i++) b[i] = 10; #pragma omp taskwait #pragma omp task shared(c) private(i) #pragma omp target map(from: c) map(to:a,b) #pragma omp parallel for for (i = 0; i < N; i++) c[i] = a[i] + b[i]; #pragma omp taskwait for (i = 0; i < N; i++) { OMPVV_TEST_AND_SET(errors, (c[i] != i + 10)); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c
#pragma omp parallel for
100
i < N; i++) a[i] = i; #pragma omp task shared(b) private(i) #pragma omp target map(from: b) <LOOP-START>for (i = 0; i < N; i++) b[i] = 10; #pragma omp taskwait #pragma omp task shared(c) private(i) #pragma omp target map(from: c) map(to:a,b) #pragma omp parallel for for (i = 0; i < N; i++) c[i] = a[i] + b[i]; #pragma omp taskwait for (i = 0; i < N; i++) { OMPVV_TEST_AND_SET(errors, (c[i] != i + 10)); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/task/test_task_target.c
#pragma omp parallel for
100
gma omp taskwait #pragma omp task shared(c) private(i) #pragma omp target map(from: c) map(to:a,b) <LOOP-START>for (i = 0; i < N; i++) c[i] = a[i] + b[i]; #pragma omp taskwait for (i = 0; i < N; i++) { OMPVV_TEST_AND_SET(errors, (c[i] != i + 10)); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/target_teams_distribute_parallel_for/test_target_teams_distribute_parallel_for_if_parallel_modifier.c
#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)
100
READS_DEVICE) for (i = 0; i < N; i++) { init_num_threads_dev[i] = omp_get_num_threads(); } <LOOP-START>for (i = 0; i < N; i++) { init_num_threads_host[i] = omp_get_num_threads(); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/4.5/target_teams_distribute_parallel_for/test_target_teams_distribute_parallel_for_if_no_modifier.c
#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)
100
READS_DEVICE) for (i = 0; i < N; i++) { init_num_threads_dev[i] = omp_get_num_threads(); } <LOOP-START>for (i = 0; i < N; i++) { init_num_threads_host[i] = omp_get_num_threads(); }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/requires/test_requires_dynamic_allocators.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
= omp_init_allocator(x_memspace, 1, x_traits); x = (int *) omp_alloc(N*sizeof(int), x_alloc); <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/atomic/test_atomic_hint.c
#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) default(shared)
100
int errors = 0, num_threads = -1; int a[N]; for (int i = 0; i < N; i++) { a[i] = 1; } <LOOP-START>for (int i = 0; i < N; i++) { if (i == 0) { num_threads = omp_get_num_threads(); #pragma omp atomic hint(omp_sync_hint_speculative) a[1] += 1; } #pragma omp atomic hint(omp_sync_hint_speculative) a[i] += i; }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) default(shared)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/taskwait/test_taskwait_depend.c
#pragma omp parallel for
100
.h" #define N 1024 int errors = 0; int test_wrapper() { //wrapper for taskwait depend function <LOOP-START>for (int i=1; i<N; i++){ int x,y,err = 0; #pragma omp task depend(inout: x) shared(x) // 1st Task x=i; #pragma omp task depend(inout: y) shared(y) // 2nd Task y=i; #pragma omp taskwait depend(in: x) //Requires the completion of the 1st task OMPVV_TEST_AND_SET(err, x!= i); #pragma omp taskwait depend(in: x,y) //Requires the completion of both tasks OMPVV_TEST_AND_SET(err, y!=i || x!=i); #pragma omp atomic errors += err; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/declare_target/test_declare_target_parallel_for.c
#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE) shared(num_threads)
100
gma omp declare target int parallel_for_fun(int a[N], int b[N], int c[N]) { int num_threads = -1; <LOOP-START>for (int i = 0; i < N; i++) { a[i] = b[i]*c[i]; if (omp_get_thread_num() == 0) { num_threads = omp_get_num_threads(); } }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_DEVICE) shared(num_threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/declare_target/test_declare_target_nested.c
#pragma omp parallel for
100
lare target int test_target() { //function in declare target statement //change values on device <LOOP-START>for (i = 0; i < N; i++) { a[i] = 5; b[i] = 10; c[i] = 15; }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/task/test_parallel_for_reduction_task_device.c
#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_DEVICE) shared(y, z, num_threads)
100
y[i] = i + 1; z[i] = 2*(i + 1); } #pragma omp target map(tofrom: sum, y, z, num_threads) { <LOOP-START>for (int i = 0; i < N; i++) { #pragma omp task in_reduction(+: sum) sum += y[i]*z[i]; if (omp_get_thread_num() == 0) { num_threads = omp_get_num_threads(); } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_DEVICE) shared(y, z, num_threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/task/test_parallel_for_reduction_task.c
#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_HOST) shared(y, z, num_threads)
100
int expected_sum = 0; for (int i = 0; i < N; i++) { y[i] = i + 1; z[i] = 2*(i + 1); } <LOOP-START>for (int i = 0; i < N; i++) { #pragma omp task in_reduction(+: sum) sum += y[i]*z[i]; if (omp_get_thread_num() == 0) { num_threads = omp_get_num_threads(); } }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(task, +: sum) num_threads(OMPVV_NUM_THREADS_HOST) shared(y, z, num_threads)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for_simd/test_parallel_for_simd_atomic.c
#pragma omp parallel for simd shared(x) num_threads(OMPVV_NUM_THREADS_HOST)
100
lel_for_simd_atomic() { OMPVV_INFOMSG("test_parallel_for_simd_atomic"); int errors = 0, x = 0; <LOOP-START>for (int i = 0; i < N; i++) { #pragma omp atomic update x += 1; }<LOOP-END> <OMP-START>#pragma omp parallel for simd shared(x) num_threads(OMPVV_NUM_THREADS_HOST)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/target/test_target_imperfect_loop.c
#pragma omp parallel for collapse(2)
100
0; j < M; j++){ data2[i][j] = 0; } } #pragma omp target map(tofrom: data1, data2) { <LOOP-START>for( int i = 0; i < N; i++){ data1[i] += i; for(int j = 0; j < M; j++){ data2[i][j] += i + j; } }<LOOP-END> <OMP-START>#pragma omp parallel for collapse(2)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate.c
#pragma omp parallel for
100
.h" #define N 1024 int test_allocate() { int errors = 0; int x[N]; #pragma omp allocate(x) <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate_allocator.c
#pragma omp parallel for simd simdlen(16) aligned(x, y: 64)
100
ERROR_IF(((intptr_t) x) % 64 != 0, "Condition (intptr_t) x) %% 64 != 0 failed") <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; y[i] = 3*i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x, y: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/allocate/test_allocate_on_device.c
#pragma omp parallel for
100
llocator(omp_default_mem_alloc) x = (int *) omp_alloc(N*sizeof(int), omp_default_mem_alloc); <LOOP-START>for (int i = 0; i < N; i++) { x[i] = 2*i; }<LOOP-END> <OMP-START>#pragma omp parallel for <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/scan/test_scan.c
#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)
100
ed_x = 0; int a[N]; int b[N]; for (int i = 0; i < N; i++) { a[i] = i; b[i] = 0; } <LOOP-START>for (int i = 0; i < N; i++) { x += a[i]; #pragma omp scan inclusive(x) b[i] = x; }<LOOP-END> <OMP-START>#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/scan/test_scan.c
#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)
100
ed_x = 0; int a[N]; int b[N]; for (int i = 0; i < N; i++) { a[i] = i; b[i] = 0; } <LOOP-START>for (int i = 0; i < N; i++) { b[i] = x; #pragma omp scan exclusive(x) x += a[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for simd reduction(inscan, +: x) num_threads(OMPVV_NUM_THREADS_HOST)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_notequals.c
#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)
100
t z[N]; for (int i = 0; i < N; i++) { x[i] = 1; y[i] = i + 1; z[i] = 2*(i + 1); } <LOOP-START>for (int i = 0; i != N; i++) { x[i] += y[i]*z[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_order_concurrent.c
#pragma omp parallel for order(concurrent) num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)
100
t z[N]; for (int i = 0; i < N; i++) { x[i] = 1; y[i] = i + 1; z[i] = 2*(i + 1); } <LOOP-START>for (int i = 0; i < N; i++) { x[i] += y[i]*z[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for order(concurrent) num_threads(OMPVV_NUM_THREADS_HOST) shared(x, y, z)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.0/parallel_for/test_parallel_for_allocate.c
#pragma omp parallel for allocate(x_alloc: x) private(x) shared(result) num_threads(OMPVV_NUM_THREADS_HOST)
100
for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { result[i][j] = -1; } } <LOOP-START>for (int i = 0; i < N; i++) { x = (int *) malloc(N*sizeof(int)); if (x != NULL) { #pragma omp simd simdlen(16) aligned(x: 64) for (int j = 0; j < N; j++) { x[j] = j*i; } for (int j = 0; j < N; j++) { result[i][j] = x[j]; } free(x); successful_alloc++; } }<LOOP-END> <OMP-START>#pragma omp parallel for allocate(x_alloc: x) private(x) shared(result) num_threads(OMPVV_NUM_THREADS_HOST)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/atomic/test_atomic_compare.c
#pragma omp parallel for shared(pmax)
100
s max through non-parallel methods if(arr[i] > smax){ smax = arr[i]; } } <LOOP-START>for(int i = 0; i<N; i++){ #pragma omp atomic compare if(arr[i] > pmax){ pmax = arr[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for shared(pmax) <OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch.c
#pragma omp parallel for
100
t *arr); #pragma omp declare variant(add_two) match(construct={dispatch}) void add(int *arr){ <LOOP-START>for (int i = 0; i < N; i++){ // Base function adds 1 to array values arr[i] = arr[i]+1; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch_is_device_ptr.c
#pragma omp parallel for
100
e device pointer"); return 1; } #pragma omp target is_device_ptr(arr) { <LOOP-START>for(int i = 0; i < N; i++){ arr[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/dispatch/test_dispatch_device.c
#pragma omp parallel for
100
nt *arr); #pragma omp declare variant(add_dev) match(construct={dispatch}) void add(int *arr){ <LOOP-START>for (int i = 0; i < N; i++){ // Base function adds 1 to array values arr[i] = arr[i]+1; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/order/test_parallel_for_order_unconstrained.c
#pragma omp parallel for order(unconstrained:concurrent)
100
= 0; i < N; i++) { x[i] = i; } OMPVV_TEST_OFFLOADING; #pragma omp target map(tofrom: x) { <LOOP-START>for (int i = 0; i < N; i++) { x[i] = x[i] + 2; }<LOOP-END> <OMP-START>#pragma omp parallel for order(unconstrained:concurrent)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c
#pragma omp parallel for
100
null"); return (1); } int not_init_to_zero = 0; int not_correct_updated_values = 0; <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != 0) { #pragma omp atomic write not_init_to_zero = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c
#pragma omp parallel for
100
+) { if (x[i] != 0) { #pragma omp atomic write not_init_to_zero = 1; } } <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_calloc_host.c
#pragma omp parallel for
100
= 1; } } #pragma omp parallel for for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { #pragma omp atomic write not_correct_updated_values = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_aligned_calloc.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
ndition ((intptr_t)(x))%%64 != 0 failed. The memory does not seem to be properly aligned."); <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_aligned_calloc.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
imd simdlen(16) aligned(x: 64) for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { #pragma omp atomic write not_correct_array_values = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_target_aligned_alloc_device.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
OMPVV_ERROR_IF(((intptr_t)(x))%64 != 0, " Condition ((intptr_t)(x))%%64 != 0 failed "); <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_target_aligned_alloc_device.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
imd simdlen(16) aligned(x: 64) for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { #pragma omp atomic write not_correct_array_values = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c
#pragma omp parallel for
100
s++; } else { int not_init_to_zero = 0; int not_correct_updated_values = 0; <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != 0) { #pragma omp atomic write not_init_to_zero = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c
#pragma omp parallel for
100
0) { #pragma omp atomic write not_init_to_zero = 1; } } <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_target_calloc.c
#pragma omp parallel for
100
#pragma omp parallel for for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { #pragma omp atomic write not_correct_updated_values = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_aligned_alloc_host.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
_SET_VERBOSE(errors, ((intptr_t)(x))%64 != 0); int values_did_not_match_expected_changes = 0; <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_omp_aligned_alloc_host.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
p parallel for simd simdlen(16) aligned(x: 64) for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { values_did_not_match_expected_changes = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_allocate_allocator_align.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
omp allocate(x) allocator(omp_default_mem_alloc) align(64) #pragma omp target map(from:x[:N]) { <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_aligned_calloc.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
Condition ((intptr_t)(x))%%64 != 0 failed. The memory does not seem to be properly aligned."); <LOOP-START>for (int i = 0; i < N; i++) { x[i] = i; }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/OpenMP-Validation-and-Verification/OpenMP_VV/tests/5.1/allocate/test_aligned_calloc.c
#pragma omp parallel for simd simdlen(16) aligned(x: 64)
100
el for simd simdlen(16) aligned(x: 64) for (int i = 0; i < N; i++) { x[i] = i; } <LOOP-START>for (int i = 0; i < N; i++) { if (x[i] != i) { #pragma omp atomic write not_correct_array_values = 1; } }<LOOP-END> <OMP-START>#pragma omp parallel for simd simdlen(16) aligned(x: 64)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_static.c
#pragma omp parallel for schedule(static)
100
DATA_TYPE POLYBENCH_2D(A, N, N, n, n)) { int i, j, k; for (k = 0; k < _PB_N; k++) { <LOOP-START>for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; #pragma omp parallel for schedule(static) for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_static.c
#pragma omp parallel for schedule(static)
100
lel for schedule(static) for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; <LOOP-START>for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_dynamic.c
#pragma omp parallel for schedule(dynamic)
100
DATA_TYPE POLYBENCH_2D(A, N, N, n, n)) { int i, j, k; for (k = 0; k < _PB_N; k++) { <LOOP-START>for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; #pragma omp parallel for schedule(dynamic) for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_dynamic.c
#pragma omp parallel for schedule(dynamic)
100
el for schedule(dynamic) for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; <LOOP-START>for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(dynamic)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_wrong_for.c
#pragma omp parallel for
100
kernel_lu(int n, DATA_TYPE POLYBENCH_2D(A, N, N, n, n)) { int i, j, k; <LOOP-START>for (k = 0; k < _PB_N; k++) { for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_gpu.c
#pragma omp parallel for
100
#pragma omp target data map(tofrom:A) { for (k = 0; k < _PB_N; k++) { <LOOP-START>for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; #pragma omp parallel for for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); //my_print_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); printf("\n"); //my_print_array(n, POLYBENCH_ARRAY(A)); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_gpu.c
#pragma omp parallel for
100
lel for for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; <LOOP-START>for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); //my_print_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); printf("\n"); //my_print_array(n, POLYBENCH_ARRAY(A)); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_guided.c
#pragma omp parallel for schedule(guided)
100
DATA_TYPE POLYBENCH_2D(A, N, N, n, n)) { int i, j, k; for (k = 0; k < _PB_N; k++) { <LOOP-START>for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; #pragma omp parallel for schedule(guided) for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/antoniopelusi/lu-solver/OpenMP/lu_for_guided.c
#pragma omp parallel for schedule(guided)
100
lel for schedule(guided) for (j = k + 1; j < _PB_N; j++) A[k][j] = A[k][j] / A[k][k]; <LOOP-START>for (i = k + 1; i < _PB_N; i++) for (j = k + 1; j < _PB_N; j++) A[i][j] = A[i][j] - A[i][k] * A[k][j]; } } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); /* Initialize array(s). */ init_array(n, POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_lu(n, POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); return 0; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(guided)<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
--------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] *= beta; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] = 0.0; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
um_vectors; i++) { y_data[i] = 0.0; } } else { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] *= temp; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
--------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_rows * num_vectors; i++) { y_data[i] *= alpha; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
--------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] *= beta; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] = 0.0; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
um_vectors; i++) { y_data[i] = 0.0; } } else { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] *= temp; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/csr_matmultivec.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
--------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < num_cols * num_vectors; i++) { y_data[i] *= alpha; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
100
ectorData(v); if (v->num_active_vectors == v->num_vectors) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (j = 0; j < v->num_vectors * size; j++) { vector_data[j] = value; }<LOOP-END> <OMP-START>#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
100
e_indices[i] * size; end_offset = start_offset + size; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (j = start_offset; j < end_offset; j++) { vector_data[j] = value; }<LOOP-END> <OMP-START>#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
100
ind[i] * size; dest = y_data + y_active_ind[i] * size; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (j = 0; j < size; j++) { dest[j] += alpha * src[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
100
ve_ind[i] * size; current_alpha = alpha[ al_active_ind[i] ]; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (j = 0; j < size; j++) { dest[j] = current_alpha * src[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
100
_data + x_active_ind[i] * size; current_product = 0.0; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (k = 0; k < size; k++) { current_product += x_ptr[k] * hypre_conj(y_ptr[k]); }<LOOP-END> <OMP-START>#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
100
tr = y_data + y_active_ind[i] * size; current_product = 0.0; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (k = 0; k < size; k++) { current_product += x_ptr[k] * hypre_conj(y_ptr[k]); }<LOOP-END> <OMP-START>#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
100
r = x_data + x_active_ind[0] * size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (k = 0; k < size; k++) { y_ptr[k] = current_coef * x_ptr[k]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
100
data + x_active_ind[i] * size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (k = 0; k < size; k++) { y_ptr[k] += current_coef * x_ptr[k]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/multivector/seq_multivector.c
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
100
data + x_active_ind[i] * size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP <LOOP-START>for (k = 0; k < size; k++) { y_ptr[k] += current_coef * x_ptr[k]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE<OMP-END>
/ascldap/users/netienn/Research/HPC-Coder/data/ClonedRepos/hypre-space/hypre/src/IJ_mv/IJMatrix_parcsr.c
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
100
} if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { #ifdef HYPRE_USING_OPENMP <LOOP-START>for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i]; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE<OMP-END>